]> review.fuel-infra Code Review - packages/trusty/rabbitmq-server.git/commitdiff
Upgrade rabbitmq to 3.6.5 44/24844/11
authorAlexey Lebedeff <alebedev@mirantis.com>
Thu, 11 Aug 2016 10:55:15 +0000 (13:55 +0300)
committerAlexey Lebedeff <alebedev@mirantis.com>
Fri, 19 Aug 2016 17:18:27 +0000 (20:18 +0300)
As a part of https://mirantis.jira.com/browse/PROD-6598

With additionally backported patches for https://review.openstack.org/#/c/355477/

Change-Id: I56ed7cd35c40b62dfbd09636d58bb6734d2f4c00

648 files changed:
debian/changelog
debian/control
debian/patches/erlang-thread-pool-autotune.patch [deleted file]
debian/patches/health-check-rabbit-node-monitor-rmq-pr-915.patch [new file with mode: 0644]
debian/patches/list-only-local-queues-rmq-pr-911.patch [new file with mode: 0644]
debian/patches/native-code-path.patch
debian/patches/rabbitmq-probe-ephemeral-port.patch [deleted file]
debian/patches/series
debian/patches/zero-deps-systemd-1.patch [deleted file]
debian/rabbitmq-server.init [changed mode: 0644->0755]
debian/rabbitmq-server.postinst
debian/rabbitmq-server.service
debian/rules
rabbitmq-server/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/CONTRIBUTING.md
rabbitmq-server/Makefile
rabbitmq-server/README.md
rabbitmq-server/build.config [deleted file]
rabbitmq-server/deps/amqp_client/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/amqp_client/CONTRIBUTING.md
rabbitmq-server/deps/amqp_client/Makefile
rabbitmq-server/deps/amqp_client/build.config [deleted file]
rabbitmq-server/deps/amqp_client/ci/test.sh [new file with mode: 0755]
rabbitmq-server/deps/amqp_client/ci/test.yml [new file with mode: 0644]
rabbitmq-server/deps/amqp_client/erlang.mk
rabbitmq-server/deps/amqp_client/include/amqp_gen_consumer_spec.hrl
rabbitmq-server/deps/amqp_client/rabbitmq-components.mk
rabbitmq-server/deps/amqp_client/src/amqp_channel_sup.erl
rabbitmq-server/deps/amqp_client/src/amqp_client.app.src
rabbitmq-server/deps/amqp_client/src/amqp_connection.erl
rabbitmq-server/deps/amqp_client/src/amqp_connection_type_sup.erl
rabbitmq-server/deps/amqp_client/src/amqp_direct_connection.erl
rabbitmq-server/deps/amqp_client/src/amqp_direct_consumer.erl
rabbitmq-server/deps/amqp_client/src/amqp_sup.erl
rabbitmq-server/deps/amqp_client/src/rabbit_ct_client_helpers.erl [new file with mode: 0644]
rabbitmq-server/deps/amqp_client/test.mk [deleted file]
rabbitmq-server/deps/amqp_client/test/Makefile [deleted file]
rabbitmq-server/deps/amqp_client/test/amqp_client_SUITE.erl [deleted file]
rabbitmq-server/deps/amqp_client/test/amqp_dbg.erl [deleted file]
rabbitmq-server/deps/amqp_client/test/negative_test_util.erl [deleted file]
rabbitmq-server/deps/amqp_client/test/system_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/amqp_client/test/test_util.erl [deleted file]
rabbitmq-server/deps/amqp_client/test/unit_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/mochiweb/.editorconfig [new file with mode: 0644]
rabbitmq-server/deps/mochiweb/CHANGES.md
rabbitmq-server/deps/mochiweb/src/mochijson2.erl
rabbitmq-server/deps/mochiweb/src/mochiweb.app.src
rabbitmq-server/deps/mochiweb/src/mochiweb_html.erl
rabbitmq-server/deps/mochiweb/test/mochiweb_html_tests.erl
rabbitmq-server/deps/rabbit_common/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbit_common/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/deps/rabbit_common/Makefile
rabbitmq-server/deps/rabbit_common/codegen.py
rabbitmq-server/deps/rabbit_common/include/old_builtin_types.hrl [new file with mode: 0644]
rabbitmq-server/deps/rabbit_common/include/rabbit.hrl
rabbitmq-server/deps/rabbit_common/include/rabbit_misc.hrl [moved from rabbitmq-server/deps/rabbitmq_event_exchange/test/src/rabbit_exchange_type_event_test_all.erl with 53% similarity]
rabbitmq-server/deps/rabbit_common/include/rabbit_msg_store.hrl
rabbitmq-server/deps/rabbit_common/mk/rabbitmq-build.mk [new file with mode: 0644]
rabbitmq-server/deps/rabbit_common/mk/rabbitmq-components.mk
rabbitmq-server/deps/rabbit_common/mk/rabbitmq-dist.mk
rabbitmq-server/deps/rabbit_common/mk/rabbitmq-plugin.mk
rabbitmq-server/deps/rabbit_common/mk/rabbitmq-run.mk
rabbitmq-server/deps/rabbit_common/mk/rabbitmq-tools.mk
rabbitmq-server/deps/rabbit_common/src/app_utils.erl
rabbitmq-server/deps/rabbit_common/src/code_version.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbit_common/src/credit_flow.erl
rabbitmq-server/deps/rabbit_common/src/gen_server2.erl
rabbitmq-server/deps/rabbit_common/src/mirrored_supervisor.erl
rabbitmq-server/deps/rabbit_common/src/pmon.erl
rabbitmq-server/deps/rabbit_common/src/priority_queue.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_amqqueue.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_auth_backend_dummy.erl [moved from rabbitmq-server/src/rabbit_auth_backend_dummy.erl with 95% similarity]
rabbitmq-server/deps/rabbit_common/src/rabbit_auth_backend_internal.erl [moved from rabbitmq-server/src/rabbit_auth_backend_internal.erl with 85% similarity]
rabbitmq-server/deps/rabbit_common/src/rabbit_auth_mechanism.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_authn_backend.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_authz_backend.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_backing_queue.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_basic.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_binary_generator.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_binary_parser.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_channel.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_channel_interceptor.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_command_assembler.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_common.app.src
rabbitmq-server/deps/rabbit_common/src/rabbit_control_misc.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_ct_broker_helpers.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbit_common/src/rabbit_ct_helpers.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbit_common/src/rabbit_error_logger_handler.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbit_common/src/rabbit_event.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_exchange_decorator.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_exchange_type.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_health_check.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbit_common/src/rabbit_heartbeat.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_misc.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_msg_store_index.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_net.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_networking.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_nodes.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_password_hashing.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_policy_validator.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_queue_collector.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_queue_decorator.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_queue_master_locator.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_reader.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_runtime_parameter.erl
rabbitmq-server/deps/rabbit_common/src/rabbit_types.erl [moved from rabbitmq-server/src/rabbit_types.erl with 99% similarity]
rabbitmq-server/deps/rabbit_common/src/rabbit_writer.erl
rabbitmq-server/deps/rabbit_common/src/rand_compat.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbit_common/src/ssl_compat.erl
rabbitmq-server/deps/rabbit_common/src/supervisor2.erl
rabbitmq-server/deps/rabbit_common/src/time_compat.erl
rabbitmq-server/deps/rabbit_common/tools/tls-certs/Makefile [new file with mode: 0644]
rabbitmq-server/deps/rabbit_common/tools/tls-certs/openssl.cnf [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_amqp1_0/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_amqp1_0/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_amqp1_0/Makefile
rabbitmq-server/deps/rabbitmq_amqp1_0/erlang.mk
rabbitmq-server/deps/rabbitmq_amqp1_0/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_binary_generator.erl
rabbitmq-server/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_binary_parser.erl
rabbitmq-server/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_link_util.erl
rabbitmq-server/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup.erl
rabbitmq-server/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_session_sup_sup.erl
rabbitmq-server/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_util.erl
rabbitmq-server/deps/rabbitmq_amqp1_0/src/rabbit_amqp1_0_writer.erl
rabbitmq-server/deps/rabbitmq_amqp1_0/src/rabbitmq_amqp1_0.app.src
rabbitmq-server/deps/rabbitmq_amqp1_0/test/lib-java/junit.jar [deleted file]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/proton/Makefile [deleted file]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/proton/build.xml [deleted file]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/proton/test/com/rabbitmq/amqp1_0/tests/proton/ProtonTests.java [deleted file]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/swiftmq/Makefile [deleted file]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/swiftmq/build.xml [deleted file]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/swiftmq/run-tests.sh [deleted file]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/swiftmq/test/com/rabbitmq/amqp1_0/tests/swiftmq/SwiftMQTests.java [deleted file]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/dotnet-tests/project.json [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/dotnet-tests/src/testsuite.cs [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/pom.xml [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/proton/FooterTest.java [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/proton/MessageAnnotationsTest.java [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/proton/RoundTripTest.java [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_amqp1_0/test/unit_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_amqp1_0/test/src/rabbit_amqp1_0_test.erl with 84% similarity]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/Makefile
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/README-tests.md
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/README.md
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/Vagrantfile [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/etc/rabbit-test.config [deleted file]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/README [deleted file]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/global.ldif
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/groups.ldif [deleted file]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/memberof_init.ldif [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/people.ldif [deleted file]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/rabbit.ldif [deleted file]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/refint_1.ldif [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/refint_2.ldif [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/seed.sh
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/setup.sh
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/src/rabbitmq_auth_backend_ldap.app.src
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/rabbit_ldap_seed.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/src/rabbit_auth_backend_ldap_test.erl [deleted file]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/system_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/unit_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/src/rabbit_auth_backend_ldap_unit_test.erl with 90% similarity]
rabbitmq-server/deps/rabbitmq_auth_mechanism_ssl/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_auth_mechanism_ssl/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_auth_mechanism_ssl/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_auth_mechanism_ssl/src/rabbitmq_auth_mechanism_ssl.app.src
rabbitmq-server/deps/rabbitmq_codegen/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_codegen/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_codegen/amqp_codegen.py
rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/Makefile
rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/README.md
rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/src/rabbit_exchange_type_consistent_hash.erl
rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/src/rabbitmq_consistent_hash_exchange.app.src
rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/test/src/rabbit_exchange_type_consistent_hash_test.erl with 62% similarity]
rabbitmq-server/deps/rabbitmq_event_exchange/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_event_exchange/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_event_exchange/Makefile
rabbitmq-server/deps/rabbitmq_event_exchange/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_event_exchange/src/rabbit_exchange_type_event.erl
rabbitmq-server/deps/rabbitmq_event_exchange/src/rabbitmq_event_exchange.app.src
rabbitmq-server/deps/rabbitmq_event_exchange/test/src/rabbit_exchange_type_event_test.erl [deleted file]
rabbitmq-server/deps/rabbitmq_event_exchange/test/system_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_event_exchange/test/unit_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_event_exchange/test/src/rabbit_exchange_type_event_unit_test.erl with 89% similarity]
rabbitmq-server/deps/rabbitmq_federation/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_federation/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_federation/Makefile
rabbitmq-server/deps/rabbitmq_federation/erlang.mk
rabbitmq-server/deps/rabbitmq_federation/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_federation/src/rabbit_federation_exchange_link_sup_sup.erl
rabbitmq-server/deps/rabbitmq_federation/src/rabbit_federation_link_sup.erl
rabbitmq-server/deps/rabbitmq_federation/src/rabbit_federation_queue_link_sup_sup.erl
rabbitmq-server/deps/rabbitmq_federation/src/rabbit_federation_sup.erl
rabbitmq-server/deps/rabbitmq_federation/src/rabbit_federation_upstream.erl
rabbitmq-server/deps/rabbitmq_federation/src/rabbitmq_federation.app.src
rabbitmq-server/deps/rabbitmq_federation/test/exchange_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_federation/test/queue_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_federation/test/src/rabbit_federation_queue_test.erl with 54% similarity]
rabbitmq-server/deps/rabbitmq_federation/test/rabbit_federation_test_util.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_federation/test/src/rabbit_federation_exchange_test.erl [deleted file]
rabbitmq-server/deps/rabbitmq_federation/test/src/rabbit_federation_test_util.erl [deleted file]
rabbitmq-server/deps/rabbitmq_federation/test/unit_inbroker_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_federation/test/src/rabbit_federation_unit_test.erl with 63% similarity]
rabbitmq-server/deps/rabbitmq_federation_management/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_federation_management/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_federation_management/README.md
rabbitmq-server/deps/rabbitmq_federation_management/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_federation_management/src/rabbitmq_federation_management.app.src
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/LICENSE [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/LICENSES.txt [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/Makefile [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/README.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/build.config [moved from rabbitmq-server/deps/rabbitmq_amqp1_0/build.config with 100% similarity]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/erlang.mk [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/include/rabbit_jms_topic_exchange.hrl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/rabbitmq-components.mk [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/src/rabbit_jms_topic_exchange.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/src/rabbitmq_jms_topic_exchange.app.src [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/src/sjx_evaluator.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_unit_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_jms_topic_exchange/test/sjx_evaluation_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_management/Makefile
rabbitmq-server/deps/rabbitmq_management/bin/rabbitmqadmin
rabbitmq-server/deps/rabbitmq_management/erlang.mk
rabbitmq-server/deps/rabbitmq_management/etc/bunny.config [deleted file]
rabbitmq-server/deps/rabbitmq_management/etc/hare.config [deleted file]
rabbitmq-server/deps/rabbitmq_management/etc/rabbit-test.config [deleted file]
rabbitmq-server/deps/rabbitmq_management/include/rabbit_mgmt.hrl
rabbitmq-server/deps/rabbitmq_management/include/rabbit_mgmt_event_collector.hrl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/include/rabbit_mgmt_metrics.hrl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/include/rabbit_mgmt_test.hrl
rabbitmq-server/deps/rabbitmq_management/priv/www/api/index.html
rabbitmq-server/deps/rabbitmq_management/priv/www/index.html
rabbitmq-server/deps/rabbitmq_management/priv/www/js/charts.js
rabbitmq-server/deps/rabbitmq_management/priv/www/js/dispatcher.js
rabbitmq-server/deps/rabbitmq_management/priv/www/js/global.js
rabbitmq-server/deps/rabbitmq_management/priv/www/js/help.js
rabbitmq-server/deps/rabbitmq_management/priv/www/js/main.js
rabbitmq-server/deps/rabbitmq_management/priv/www/js/sammy.js [changed mode: 0755->0644]
rabbitmq-server/deps/rabbitmq_management/priv/www/js/sammy.min.js [changed mode: 0755->0644]
rabbitmq-server/deps/rabbitmq_management/priv/www/js/tmpl/channel.ejs
rabbitmq-server/deps/rabbitmq_management/priv/www/js/tmpl/connection.ejs
rabbitmq-server/deps/rabbitmq_management/priv/www/js/tmpl/connections.ejs
rabbitmq-server/deps/rabbitmq_management/priv/www/js/tmpl/node.ejs
rabbitmq-server/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs
rabbitmq-server/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs
rabbitmq-server/deps/rabbitmq_management/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_channel_stats_collector.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_cors.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_db.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_event_collector.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_event_collector_utils.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_format.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_queue_stats_collector.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_stats.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_stats_gc.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_stats_tables.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_sup.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_sup_sup.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_util.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_aliveness_test.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_binding.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_bindings.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_channel.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_channels_vhost.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_cluster_name.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_connection_channels.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_connections_vhost.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_consumers.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchange_publish.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_exchanges.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_extensions.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_healthchecks.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_node.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory_ets.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_nodes.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_overview.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_permission.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_user.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_permissions_vhost.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_policies.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_policy.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_actions.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_get.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_queue_purge.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_queues.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_user.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_users.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhost.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_vhosts.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_whoami.erl
rabbitmq-server/deps/rabbitmq_management/src/rabbitmq_management.app.src
rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_clustering_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_rabbitmqadmin_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_runtime_parameters_util.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_test_db_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_db.erl with 65% similarity]
rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_test_unit_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_unit.erl with 71% similarity]
rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_test_util.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management/test/src/default-config [deleted file]
rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_clustering.erl [deleted file]
rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_db_unit.erl [deleted file]
rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_http.erl [deleted file]
rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_util.erl [deleted file]
rabbitmq-server/deps/rabbitmq_management/test/src/rabbitmqadmin-test-wrapper.sh [deleted file]
rabbitmq-server/deps/rabbitmq_management/test/src/rabbitmqadmin-test.py [deleted file]
rabbitmq-server/deps/rabbitmq_management/test/src/test-config [deleted file]
rabbitmq-server/deps/rabbitmq_management_agent/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management_agent/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_management_agent/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_management_agent/src/rabbit_mgmt_db_handler.erl
rabbitmq-server/deps/rabbitmq_management_agent/src/rabbit_mgmt_external_stats.erl
rabbitmq-server/deps/rabbitmq_management_agent/src/rabbitmq_management_agent.app.src
rabbitmq-server/deps/rabbitmq_management_visualiser/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_management_visualiser/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_management_visualiser/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_management_visualiser/src/rabbitmq_management_visualiser.app.src
rabbitmq-server/deps/rabbitmq_mqtt/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_mqtt/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_mqtt/Makefile
rabbitmq-server/deps/rabbitmq_mqtt/include/rabbit_mqtt.hrl
rabbitmq-server/deps/rabbitmq_mqtt/include/rabbit_mqtt_frame.hrl
rabbitmq-server/deps/rabbitmq_mqtt/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_mqtt/src/rabbit_mqtt_connection_sup.erl
rabbitmq-server/deps/rabbitmq_mqtt/src/rabbit_mqtt_frame.erl
rabbitmq-server/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl
rabbitmq-server/deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl
rabbitmq-server/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer.erl
rabbitmq-server/deps/rabbitmq_mqtt/src/rabbit_mqtt_retainer_sup.erl
rabbitmq-server/deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl
rabbitmq-server/deps/rabbitmq_mqtt/src/rabbitmq_mqtt.app.src
rabbitmq-server/deps/rabbitmq_mqtt/test/Makefile
rabbitmq-server/deps/rabbitmq_mqtt/test/build.properties
rabbitmq-server/deps/rabbitmq_mqtt/test/build.xml
rabbitmq-server/deps/rabbitmq_mqtt/test/java_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_mqtt/test/processor_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_mqtt/test/rabbit-test.sh [deleted file]
rabbitmq-server/deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app [moved from rabbitmq-server/deps/rabbitmq_mqtt/test/src/rabbitmq_mqtt_standalone.app.src with 100% similarity]
rabbitmq-server/deps/rabbitmq_mqtt/test/reader_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_mqtt/test/setup-rabbit-test.sh [deleted file]
rabbitmq-server/deps/rabbitmq_mqtt/test/src/com/rabbitmq/mqtt/test/MqttTest.java
rabbitmq-server/deps/rabbitmq_mqtt/test/src/com/rabbitmq/mqtt/test/tls/MqttSSLTest.java
rabbitmq-server/deps/rabbitmq_mqtt/test/src/rabbit_mqtt_processor_tests.erl [deleted file]
rabbitmq-server/deps/rabbitmq_mqtt/test/test.sh [deleted file]
rabbitmq-server/deps/rabbitmq_mqtt/test/util_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_mqtt/test/src/rabbit_mqtt_util_tests.erl with 52% similarity]
rabbitmq-server/deps/rabbitmq_recent_history_exchange/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_recent_history_exchange/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_recent_history_exchange/Makefile
rabbitmq-server/deps/rabbitmq_recent_history_exchange/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_recent_history_exchange/test/src/rabbit_exchange_type_recent_history_test_util.erl [deleted file]
rabbitmq-server/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_recent_history_exchange/test/src/rabbit_exchange_type_recent_history_test.erl with 53% similarity]
rabbitmq-server/deps/rabbitmq_sharding/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_sharding/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_sharding/Makefile
rabbitmq-server/deps/rabbitmq_sharding/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_hash_exchange_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_hash_exchange_test.erl [deleted file]
rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_sharding_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_sharding_test.erl [deleted file]
rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_sharding_test_all.erl [deleted file]
rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_sharding_test_util.erl [deleted file]
rabbitmq-server/deps/rabbitmq_shovel/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_shovel/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_shovel/Makefile
rabbitmq-server/deps/rabbitmq_shovel/README.md
rabbitmq-server/deps/rabbitmq_shovel/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_shovel/src/rabbit_shovel_dyn_worker_sup_sup.erl
rabbitmq-server/deps/rabbitmq_shovel/src/rabbit_shovel_worker.erl
rabbitmq-server/deps/rabbitmq_shovel/src/rabbitmq_shovel.app.src
rabbitmq-server/deps/rabbitmq_shovel/test/configuration_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_shovel/test/src/rabbit_shovel_test.erl with 71% similarity]
rabbitmq-server/deps/rabbitmq_shovel/test/dynamic_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_shovel/test/src/rabbit_shovel_test_dyn.erl with 51% similarity]
rabbitmq-server/deps/rabbitmq_shovel/test/src/rabbit_shovel_test_all.erl [deleted file]
rabbitmq-server/deps/rabbitmq_shovel_management/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_shovel_management/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_shovel_management/Makefile
rabbitmq-server/deps/rabbitmq_shovel_management/README [deleted file]
rabbitmq-server/deps/rabbitmq_shovel_management/README.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_shovel_management/erlang.mk
rabbitmq-server/deps/rabbitmq_shovel_management/etc/rabbit-test.config [deleted file]
rabbitmq-server/deps/rabbitmq_shovel_management/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_shovel_management/src/rabbitmq_shovel_management.app.src
rabbitmq-server/deps/rabbitmq_shovel_management/test/http_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_shovel_management/test/src/rabbit_shovel_mgmt_test_all.erl [deleted file]
rabbitmq-server/deps/rabbitmq_shovel_management/test/src/rabbit_shovel_mgmt_test_http.erl [deleted file]
rabbitmq-server/deps/rabbitmq_stomp/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_stomp/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_stomp/Makefile
rabbitmq-server/deps/rabbitmq_stomp/include/rabbit_stomp.hrl
rabbitmq-server/deps/rabbitmq_stomp/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_stomp/src/rabbit_stomp_client_sup.erl
rabbitmq-server/deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl
rabbitmq-server/deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl
rabbitmq-server/deps/rabbitmq_stomp/src/rabbitmq_stomp.app.src
rabbitmq-server/deps/rabbitmq_stomp/test/amqqueue_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/rabbit_stomp_amqqueue_test.erl with 61% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/connections_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/rabbit_stomp_test.erl with 55% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/frame_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/rabbit_stomp_test_frame.erl with 78% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/ack.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/ack.py with 99% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/base.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/base.py with 98% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/connect_options.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/connect_options.py with 95% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/destinations.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/destinations.py with 100% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/errors.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/errors.py with 100% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/lifecycle.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/lifecycle.py with 100% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/parsing.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/parsing.py with 99% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/queue_properties.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/queue_properties.py with 95% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/redelivered.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/redelivered.py with 100% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/reliability.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/reliability.py with 100% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/ssl_lifecycle.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/ssl_lifecycle.py with 96% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/test.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/test.py with 100% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_connect_options.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/test_connect_options.py with 100% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_runner.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/test_runner.py with 100% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_ssl.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/test_ssl.py with 100% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/test_util.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/test_util.py with 100% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/transactions.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/transactions.py with 100% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_name.py [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/x_queue_name.py with 89% similarity]
rabbitmq-server/deps/rabbitmq_stomp/test/src/rabbit_stomp_client.erl
rabbitmq-server/deps/rabbitmq_stomp/test/util_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_stomp/test/src/rabbit_stomp_test_util.erl with 81% similarity]
rabbitmq-server/deps/rabbitmq_top/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/Makefile [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/README.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/build.config [moved from rabbitmq-server/deps/rabbitmq_federation/build.config with 100% similarity]
rabbitmq-server/deps/rabbitmq_top/erlang.mk [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/priv/www/js/tmpl/ets_tables.ejs [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/priv/www/js/tmpl/process.ejs [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/priv/www/js/tmpl/processes.ejs [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/priv/www/js/top.js [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/rabbitmq-components.mk [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_app.erl [moved from rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_all.erl with 60% similarity]
rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_extension.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_sup.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_util.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_wm_ets_tables.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_wm_process.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_wm_processes.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_worker.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_top/src/rabbitmq_top.app.src [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_tracing/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_tracing/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_tracing/Makefile
rabbitmq-server/deps/rabbitmq_tracing/README.md
rabbitmq-server/deps/rabbitmq_tracing/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_tracing/src/rabbit_tracing_consumer_sup.erl
rabbitmq-server/deps/rabbitmq_tracing/src/rabbit_tracing_sup.erl
rabbitmq-server/deps/rabbitmq_tracing/src/rabbitmq_tracing.app.src
rabbitmq-server/deps/rabbitmq_tracing/test/rabbit_tracing_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_tracing/test/src/rabbit_tracing_test.erl with 51% similarity]
rabbitmq-server/deps/rabbitmq_trust_store/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_trust_store/CONTRIBUTING.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_trust_store/Makefile [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_trust_store/README.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_trust_store/build.config [moved from rabbitmq-server/deps/rabbitmq_shovel_management/build.config with 100% similarity]
rabbitmq-server/deps/rabbitmq_trust_store/erlang.mk [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_trust_store/rabbitmq-components.mk [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_trust_store/src/rabbit_trust_store.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_trust_store/src/rabbit_trust_store_app.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_trust_store/src/rabbit_trust_store_sup.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_trust_store/src/rabbitmq_trust_store.app.src [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_trust_store/test/system_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_web_dispatch/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_web_dispatch/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_web_dispatch/Makefile
rabbitmq-server/deps/rabbitmq_web_dispatch/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_web_dispatch/src/rabbit_webmachine_error_handler.erl
rabbitmq-server/deps/rabbitmq_web_dispatch/src/rabbitmq_web_dispatch.app.src
rabbitmq-server/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_unit_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_web_dispatch/test/src/rabbit_web_dispatch_test_unit.erl with 54% similarity]
rabbitmq-server/deps/rabbitmq_web_dispatch/test/src/rabbit_web_dispatch_test.erl [deleted file]
rabbitmq-server/deps/rabbitmq_web_stomp/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_web_stomp/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_web_stomp/Makefile
rabbitmq-server/deps/rabbitmq_web_stomp/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_web_stomp/src/rabbit_ws_client.erl
rabbitmq-server/deps/rabbitmq_web_stomp/src/rabbit_ws_client_sup.erl
rabbitmq-server/deps/rabbitmq_web_stomp/src/rabbit_ws_handler.erl
rabbitmq-server/deps/rabbitmq_web_stomp/src/rabbitmq_web_stomp.app.src
rabbitmq-server/deps/rabbitmq_web_stomp/test/cowboy_websocket_SUITE.erl [moved from rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_cowboy_websocket.erl with 51% similarity]
rabbitmq-server/deps/rabbitmq_web_stomp/test/raw_websocket_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_web_stomp/test/sockjs_websocket_SUITE.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_raw_websocket.erl [deleted file]
rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_sockjs_websocket.erl [deleted file]
rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_util.erl [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rfc6455_client.erl
rabbitmq-server/deps/rabbitmq_web_stomp_examples/CODE_OF_CONDUCT.md [new file with mode: 0644]
rabbitmq-server/deps/rabbitmq_web_stomp_examples/CONTRIBUTING.md
rabbitmq-server/deps/rabbitmq_web_stomp_examples/priv/echo.html
rabbitmq-server/deps/rabbitmq_web_stomp_examples/rabbitmq-components.mk
rabbitmq-server/deps/rabbitmq_web_stomp_examples/src/rabbitmq_web_stomp_examples.app.src
rabbitmq-server/deps/ranch/ranch.d [new file with mode: 0644]
rabbitmq-server/deps/sockjs/src/sockjs_handler.erl
rabbitmq-server/docs/README-for-packages
rabbitmq-server/docs/rabbitmq-server.service.example [new file with mode: 0644]
rabbitmq-server/docs/rabbitmqctl.1.xml
rabbitmq-server/docs/set_rabbitmq_policy.sh.example [moved from rabbitmq-server/scripts/set_rabbitmq_policy.sh with 99% similarity]
rabbitmq-server/erlang.mk
rabbitmq-server/git-revisions.txt
rabbitmq-server/include/gm_specs.hrl
rabbitmq-server/include/rabbit_cli.hrl
rabbitmq-server/rabbitmq-components.mk
rabbitmq-server/scripts/rabbitmq-defaults [changed mode: 0644->0755]
rabbitmq-server/scripts/rabbitmq-defaults.bat
rabbitmq-server/scripts/rabbitmq-env [changed mode: 0644->0755]
rabbitmq-server/scripts/rabbitmq-env.bat
rabbitmq-server/scripts/rabbitmq-server
rabbitmq-server/scripts/rabbitmq-server-ha.ocf
rabbitmq-server/scripts/rabbitmq-server.bat
rabbitmq-server/scripts/rabbitmq-service.bat
rabbitmq-server/scripts/rabbitmqctl
rabbitmq-server/scripts/travis_test_ocf_ra.sh [new file with mode: 0644]
rabbitmq-server/src/background_gc.erl
rabbitmq-server/src/delegate.erl
rabbitmq-server/src/delegate_sup.erl
rabbitmq-server/src/dtree.erl
rabbitmq-server/src/file_handle_cache.erl
rabbitmq-server/src/file_handle_cache_stats.erl
rabbitmq-server/src/gatherer.erl
rabbitmq-server/src/gm.erl
rabbitmq-server/src/lqueue.erl
rabbitmq-server/src/mnesia_sync.erl
rabbitmq-server/src/pg_local.erl
rabbitmq-server/src/rabbit.app.src
rabbitmq-server/src/rabbit.erl
rabbitmq-server/src/rabbit_access_control.erl
rabbitmq-server/src/rabbit_alarm.erl
rabbitmq-server/src/rabbit_amqqueue_process.erl
rabbitmq-server/src/rabbit_amqqueue_sup.erl
rabbitmq-server/src/rabbit_amqqueue_sup_sup.erl
rabbitmq-server/src/rabbit_binding.erl
rabbitmq-server/src/rabbit_channel_sup.erl
rabbitmq-server/src/rabbit_channel_sup_sup.erl
rabbitmq-server/src/rabbit_cli.erl
rabbitmq-server/src/rabbit_client_sup.erl
rabbitmq-server/src/rabbit_connection_helper_sup.erl
rabbitmq-server/src/rabbit_connection_sup.erl
rabbitmq-server/src/rabbit_control_main.erl
rabbitmq-server/src/rabbit_dead_letter.erl
rabbitmq-server/src/rabbit_direct.erl
rabbitmq-server/src/rabbit_disk_monitor.erl
rabbitmq-server/src/rabbit_epmd_monitor.erl
rabbitmq-server/src/rabbit_error_logger.erl
rabbitmq-server/src/rabbit_exchange.erl
rabbitmq-server/src/rabbit_exchange_type_headers.erl
rabbitmq-server/src/rabbit_exchange_type_invalid.erl
rabbitmq-server/src/rabbit_file.erl
rabbitmq-server/src/rabbit_framing.erl
rabbitmq-server/src/rabbit_guid.erl
rabbitmq-server/src/rabbit_hipe.erl
rabbitmq-server/src/rabbit_limiter.erl
rabbitmq-server/src/rabbit_log.erl
rabbitmq-server/src/rabbit_memory_monitor.erl
rabbitmq-server/src/rabbit_mirror_queue_coordinator.erl
rabbitmq-server/src/rabbit_mirror_queue_master.erl
rabbitmq-server/src/rabbit_mirror_queue_misc.erl
rabbitmq-server/src/rabbit_mirror_queue_mode.erl
rabbitmq-server/src/rabbit_mirror_queue_mode_exactly.erl
rabbitmq-server/src/rabbit_mirror_queue_slave.erl
rabbitmq-server/src/rabbit_mirror_queue_sync.erl
rabbitmq-server/src/rabbit_mnesia.erl
rabbitmq-server/src/rabbit_mnesia_rename.erl
rabbitmq-server/src/rabbit_msg_file.erl
rabbitmq-server/src/rabbit_msg_store.erl
rabbitmq-server/src/rabbit_msg_store_gc.erl
rabbitmq-server/src/rabbit_node_monitor.erl
rabbitmq-server/src/rabbit_password.erl
rabbitmq-server/src/rabbit_plugins.erl
rabbitmq-server/src/rabbit_plugins_main.erl
rabbitmq-server/src/rabbit_policy.erl
rabbitmq-server/src/rabbit_prelaunch.erl
rabbitmq-server/src/rabbit_prequeue.erl
rabbitmq-server/src/rabbit_priority_queue.erl
rabbitmq-server/src/rabbit_queue_consumers.erl
rabbitmq-server/src/rabbit_queue_index.erl
rabbitmq-server/src/rabbit_queue_location_validator.erl
rabbitmq-server/src/rabbit_recovery_terms.erl
rabbitmq-server/src/rabbit_registry.erl
rabbitmq-server/src/rabbit_resource_monitor_misc.erl
rabbitmq-server/src/rabbit_restartable_sup.erl
rabbitmq-server/src/rabbit_router.erl
rabbitmq-server/src/rabbit_runtime_parameters.erl
rabbitmq-server/src/rabbit_ssl.erl
rabbitmq-server/src/rabbit_sup.erl
rabbitmq-server/src/rabbit_table.erl
rabbitmq-server/src/rabbit_trace.erl
rabbitmq-server/src/rabbit_upgrade.erl
rabbitmq-server/src/rabbit_upgrade_functions.erl
rabbitmq-server/src/rabbit_variable_queue.erl
rabbitmq-server/src/rabbit_version.erl
rabbitmq-server/src/rabbit_vhost.erl
rabbitmq-server/src/rabbit_vm.erl
rabbitmq-server/src/supervised_lifecycle.erl
rabbitmq-server/src/tcp_listener.erl
rabbitmq-server/src/tcp_listener_sup.erl
rabbitmq-server/src/truncate.erl
rabbitmq-server/src/vm_memory_monitor.erl
rabbitmq-server/src/worker_pool.erl
rabbitmq-server/src/worker_pool_sup.erl
rabbitmq-server/src/worker_pool_worker.erl
rabbitmq-server/test/channel_operation_timeout_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/channel_operation_timeout_test_queue.erl [new file with mode: 0644]
rabbitmq-server/test/cluster_rename_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/clustering_management_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/crashing_queues_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/dummy_event_receiver.erl [new file with mode: 0644]
rabbitmq-server/test/dummy_runtime_parameters.erl [new file with mode: 0644]
rabbitmq-server/test/dummy_supervisor2.erl [new file with mode: 0644]
rabbitmq-server/test/dynamic_ha_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/eager_sync_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/gm_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/health_check_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/inet_proxy_dist.erl [new file with mode: 0644]
rabbitmq-server/test/inet_tcp_proxy.erl [new file with mode: 0644]
rabbitmq-server/test/inet_tcp_proxy_manager.erl [new file with mode: 0644]
rabbitmq-server/test/lazy_queue_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/many_node_ha_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/mirrored_supervisor_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/mirrored_supervisor_SUITE_gs.erl [new file with mode: 0644]
rabbitmq-server/test/msg_store_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/partitions_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/priority_queue_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/priority_queue_recovery_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/queue_master_location_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/rabbit_ha_test_consumer.erl [new file with mode: 0644]
rabbitmq-server/test/rabbit_ha_test_producer.erl [new file with mode: 0644]
rabbitmq-server/test/simple_ha_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/sup_delayed_restart_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/sync_detection_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/unit_SUITE.erl [new file with mode: 0644]
rabbitmq-server/test/unit_inbroker_SUITE.erl [new file with mode: 0644]
tests/runtests.sh

index 5dd36934569640984ea0d2807f1b9369d5037be5..a97770229426d99a99575c7e9884c69262b68e8d 100644 (file)
@@ -1,3 +1,9 @@
+rabbitmq-server (3.6.5-1~u14.04+mos1) mos9.0; urgency=medium
+
+  * New upstream release.
+
+ -- Alexey Lebedeff <alebedev@mirantis.com>  Thu, 11 Aug 2016 13:45:00 +0300
+
 rabbitmq-server (3.6.1-1~u14.04+mos3) mos9.0; urgency=medium
 
   * Autodetect erlang async thread pool size.
index 6388df80c0d9bcd651841c0091faeed72be6d3a9..0f7d53fd1cab8f3d68eaec1c8e92ab52662c2ce1 100644 (file)
@@ -22,10 +22,11 @@ Homepage: http://www.rabbitmq.com/
 Package: rabbitmq-server
 Architecture: all
 Depends: adduser,
-         erlang-nox (>= 1:18.1),
-         erlang-base-hipe (>= 1:18.1),
+         erlang-nox (>= 1:18.1) | esl-erlang,
+         erlang-base-hipe (>= 1:18.1) | esl-erlang,
          logrotate,
-         ${misc:Depends}
+         socat,
+         init-system-helpers (>= 1.14)
 Description: AMQP server written in Erlang
  RabbitMQ is an implementation of AMQP, the emerging standard for high
  performance enterprise messaging. The RabbitMQ server is a robust and
diff --git a/debian/patches/erlang-thread-pool-autotune.patch b/debian/patches/erlang-thread-pool-autotune.patch
deleted file mode 100644 (file)
index 5a1214d..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-Description: Autotune erlang thread pool size
- Useful for hosts with big number of CPU cores
-Origin: upstream, https://github.com/binarin/rabbitmq-server/tree/rabbitmq-server-151
-Bug: https://github.com/binarin/rabbitmq-server/tree/rabbitmq-server-151
-Applied-Upstream: 3.6.2
----
-This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
---- a/deps/rabbit_common/src/rabbit_misc.erl
-+++ b/deps/rabbit_common/src/rabbit_misc.erl
-@@ -73,6 +73,7 @@
- -export([get_env/3]).
- -export([get_channel_operation_timeout/0]).
- -export([random/1]).
-+-export([report_default_thread_pool_size/0]).
- %% Horrible macro to use in guards
- -define(IS_BENIGN_EXIT(R),
-@@ -263,6 +264,7 @@
- -spec(get_env/3 :: (atom(), atom(), term())  -> term()).
- -spec(get_channel_operation_timeout/0 :: () -> non_neg_integer()).
- -spec(random/1 :: (non_neg_integer()) -> non_neg_integer()).
-+-spec(report_default_thread_pool_size/0 :: () -> 'ok').
- -endif.
-@@ -1160,6 +1162,24 @@ random(N) ->
-     end,
-     random:uniform(N).
-+guess_number_of_cpu_cores() ->
-+    case erlang:system_info(logical_processors_available) of
-+        unknown -> % Happens on Mac OS X.
-+            erlang:system_info(schedulers);
-+        N -> N
-+    end.
-+
-+%% Discussion of choosen values is at
-+%% https://github.com/rabbitmq/rabbitmq-server/issues/151
-+guess_default_thread_pool_size() ->
-+    PoolSize = 16 * guess_number_of_cpu_cores(),
-+    min(1024, max(64, PoolSize)).
-+
-+report_default_thread_pool_size() ->
-+    io:format("~b", [guess_default_thread_pool_size()]),
-+    erlang:halt(0),
-+    ok.
-+
- %% -------------------------------------------------------------------------
- %% Begin copypasta from gen_server2.erl
---- a/scripts/rabbitmq-defaults
-+++ b/scripts/rabbitmq-defaults
-@@ -40,6 +40,5 @@ MNESIA_BASE=${SYS_PREFIX}/var/lib/rabbit
- ENABLED_PLUGINS_FILE=${SYS_PREFIX}/etc/rabbitmq/enabled_plugins
- PLUGINS_DIR="${RABBITMQ_HOME}/plugins"
--IO_THREAD_POOL_SIZE=64
- CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf
---- a/scripts/rabbitmq-server
-+++ b/scripts/rabbitmq-server
-@@ -117,7 +117,23 @@ fi
- # there is no other way of preventing their expansion.
- set -f
-+# Lazy initialization of threed pool size - if it wasn't set
-+# explicitly. This parameter is only needed when server is starting,
-+# so it makes no sense to do this calculations in rabbitmq-env or
-+# rabbitmq-defaults scripts.
-+ensure_thread_pool_size() {
-+    if [ -z "${RABBITMQ_IO_THREAD_POOL_SIZE}" ]; then
-+        RABBITMQ_IO_THREAD_POOL_SIZE=$(
-+            ${ERL_DIR}erl -pa "$RABBITMQ_EBIN_ROOT" \
-+                      -boot "${CLEAN_BOOT_FILE}" \
-+                      -noinput \
-+                      -s rabbit_misc report_default_thread_pool_size
-+        )
-+    fi
-+}
-+
- start_rabbitmq_server() {
-+    ensure_thread_pool_size
-     RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \
-     exec ${ERL_DIR}erl \
-     -pa /var/lib/rabbitmq/native-code \
diff --git a/debian/patches/health-check-rabbit-node-monitor-rmq-pr-915.patch b/debian/patches/health-check-rabbit-node-monitor-rmq-pr-915.patch
new file mode 100644 (file)
index 0000000..9e3954d
--- /dev/null
@@ -0,0 +1,90 @@
+--- a/deps/rabbit_common/src/rabbit_health_check.erl
++++ b/deps/rabbit_common/src/rabbit_health_check.erl
+@@ -35,7 +35,7 @@ node(Node, Timeout) ->
+     rabbit_misc:rpc_call(Node, rabbit_health_check, local, [], Timeout).
+ local() ->
+-    run_checks([list_channels, list_queues, alarms]).
++    run_checks([list_channels, list_queues, alarms, rabbit_node_monitor]).
+ %%----------------------------------------------------------------------------
+ %% Internal functions
+@@ -63,6 +63,16 @@ node_health_check(list_channels) ->
+ node_health_check(list_queues) ->
+     health_check_queues(rabbit_vhost:list());
++node_health_check(rabbit_node_monitor) ->
++    case rabbit_node_monitor:partitions() of
++        L when is_list(L) ->
++            ok;
++        Other ->
++            ErrorMsg = io_lib:format("rabbit_node_monitor:partitions/1 unexpected out: ~p",
++                                     [Other]),
++            {error_string, ErrorMsg}
++    end;
++
+ node_health_check(alarms) ->
+     case proplists:get_value(alarms, rabbit:status()) of
+         [] ->
+--- a/src/rabbit_autoheal.erl
++++ b/src/rabbit_autoheal.erl
+@@ -297,6 +297,17 @@ winner_finish(Notify) ->
+     send(leader(), {autoheal_finished, node()}),
+     not_healing.
++%% XXX This can enter infinite loop, if mnesia was somehow restarted
++%% outside of our control - i.e. somebody started app back by hand or
++%% completely restarted node. One possible solution would be something
++%% like this (but it needs some more pondering and is left for some
++%% other patch):
++%% - monitor top-level mnesia supervisors of all losers
++%% - notify loosers about the fact that they are indeed loosers
++%% - wait for all monitors to go 'DOWN' (+ maybe some timeout on the whole process)
++%% - do one round of parallel rpc calls to check whether mnesia is still stoppend on all
++%%   loosers
++%% - If everything is still stopped, continue autoheall process. Or cancel it otherwise.
+ wait_for_mnesia_shutdown([Node | Rest] = AllNodes) ->
+     case rpc:call(Node, mnesia, system_info, [is_running]) of
+         no ->
+--- a/test/health_check_SUITE.erl
++++ b/test/health_check_SUITE.erl
+@@ -33,6 +33,8 @@
+         ,ignores_remote_alarms/1
+         ,detects_local_alarm/1
+         ,honors_timeout_argument/1
++        ,detects_stuck_local_node_monitor/1
++        ,ignores_stuck_remote_node_monitor/1
+         ]).
+ all() ->
+@@ -47,6 +49,8 @@ groups() ->
+       ,ignores_remote_alarms
+       ,detects_local_alarm
+       ,honors_timeout_argument
++      ,detects_stuck_local_node_monitor
++      ,ignores_stuck_remote_node_monitor
+       ]}].
+ init_per_suite(Config) ->
+@@ -123,6 +127,21 @@ detects_local_alarm(Config) ->
+     {match, _} = re:run(Str, "resource alarm.*in effect"),
+     ok.
++detects_stuck_local_node_monitor(Config) ->
++    [A|_] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
++    rabbit_ct_broker_helpers:rpc(Config, A, sys, suspend, [rabbit_node_monitor]),
++    {error, 75, Str} = rabbit_ct_broker_helpers:rabbitmqctl(Config, A, ["-t", "5", "node_health_check"]),
++    {match, _} = re:run(Str, "operation node_health_check.*timed out"),
++    resume_sys_process(Config, A, rabbit_node_monitor),
++    ok.
++
++ignores_stuck_remote_node_monitor(Config) ->
++    [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
++    rabbit_ct_broker_helpers:rpc(Config, A, sys, suspend, [rabbit_node_monitor]),
++    {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, B, ["-t", "5", "node_health_check"]),
++    resume_sys_process(Config, A, rabbit_node_monitor),
++    ok.
++
+ honors_timeout_argument(Config) ->
+     [A|_] = open_channel_and_declare_queue_everywhere(Config),
+     QPid = suspend_single_queue(Config, A),
diff --git a/debian/patches/list-only-local-queues-rmq-pr-911.patch b/debian/patches/list-only-local-queues-rmq-pr-911.patch
new file mode 100644 (file)
index 0000000..45e48d2
--- /dev/null
@@ -0,0 +1,330 @@
+diff -ur rabbitmq-server.orig/deps/rabbit_common/src/rabbit_amqqueue.erl rabbitmq-server/deps/rabbit_common/src/rabbit_amqqueue.erl
+--- rabbitmq-server.orig/deps/rabbit_common/src/rabbit_amqqueue.erl    2016-08-11 16:00:38.352019183 +0300
++++ rabbitmq-server/deps/rabbit_common/src/rabbit_amqqueue.erl 2016-08-12 13:30:11.356483617 +0300
+@@ -25,7 +25,7 @@
+          check_exclusive_access/2, with_exclusive_access_or_die/3,
+          stat/1, deliver/2, requeue/3, ack/3, reject/4]).
+ -export([list/0, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2,
+-         info_all/6, info_local/1]).
++         info_all/5, info_local/1]).
+ -export([list_down/1]).
+ -export([force_event_refresh/1, notify_policy_changed/1]).
+ -export([consumers/1, consumers_all/1,  consumers_all/3, consumer_info_keys/0]).
+@@ -117,8 +117,9 @@
+ -spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()].
+ -spec info_all(rabbit_types:vhost(), rabbit_types:info_keys()) ->
+           [rabbit_types:infos()].
++-type info_all_filter() :: 'all' | 'online' | 'offline' | 'local'.
+ -spec info_all
+-        (rabbit_types:vhost(), rabbit_types:info_keys(), boolean(), boolean(),
++        (rabbit_types:vhost(), rabbit_types:info_keys(), info_all_filter(),
+          reference(), pid()) ->
+             'ok'.
+ -spec force_event_refresh(reference()) -> 'ok'.
+@@ -627,15 +628,28 @@
+     map(list(VHostPath), fun (Q) -> info(Q, Items) end) ++
+         map(list_down(VHostPath), fun (Q) -> info_down(Q, Items, down) end).
+-info_all(VHostPath, Items, NeedOnline, NeedOffline, Ref, AggregatorPid) ->
+-    NeedOnline andalso rabbit_control_misc:emitting_map_with_exit_handler(
+-                         AggregatorPid, Ref, fun(Q) -> info(Q, Items) end, list(VHostPath),
+-                         continue),
+-    NeedOffline andalso rabbit_control_misc:emitting_map_with_exit_handler(
+-                          AggregatorPid, Ref, fun(Q) -> info_down(Q, Items, down) end,
+-                          list_down(VHostPath),
+-                          continue),
+-    %% Previous maps are incomplete, finalize emission
++info_all_partial_emit(VHostPath, Items, all, Ref, AggregatorPid) ->
++    info_all_partial_emit(VHostPath, Items, online, Ref, AggregatorPid),
++    info_all_partial_emit(VHostPath, Items, offline, Ref, AggregatorPid);
++info_all_partial_emit(VHostPath, Items, online, Ref, AggregatorPid) ->
++    rabbit_control_misc:emitting_map_with_exit_handler(
++      AggregatorPid, Ref, fun(Q) -> info(Q, Items) end,
++      list(VHostPath),
++      continue);
++info_all_partial_emit(VHostPath, Items, offline, Ref, AggregatorPid) ->
++    rabbit_control_misc:emitting_map_with_exit_handler(
++      AggregatorPid, Ref, fun(Q) -> info_down(Q, Items, down) end,
++      list_down(VHostPath),
++      continue);
++info_all_partial_emit(VHostPath, Items, local, Ref, AggregatorPid) ->
++    rabbit_control_misc:emitting_map_with_exit_handler(
++      AggregatorPid, Ref, fun(Q) -> info(Q, Items) end,
++      list_local(VHostPath),
++      continue).
++
++info_all(VHostPath, Items, Filter, Ref, AggregatorPid) ->
++    info_all_partial_emit(VHostPath, Items, Filter, Ref, AggregatorPid),
++    %% Previous map(s) are incomplete, finalize emission
+     rabbit_control_misc:emitting_map(AggregatorPid, Ref, fun(_) -> no_op end, []).
+ info_local(VHostPath) ->
+diff -ur rabbitmq-server.orig/docs/rabbitmqctl.1.xml rabbitmq-server/docs/rabbitmqctl.1.xml
+--- rabbitmq-server.orig/docs/rabbitmqctl.1.xml        2016-08-11 16:00:38.387018772 +0300
++++ rabbitmq-server/docs/rabbitmqctl.1.xml     2016-08-12 13:29:18.480081969 +0300
+@@ -749,9 +749,7 @@
+         <varlistentry>
+           <term>
+-          <cmdsynopsis>
+-            <command>authenticate_user</command> <arg choice="req"><replaceable>username</replaceable></arg> <arg choice="req"><replaceable>password</replaceable></arg>
+-          </cmdsynopsis>
++          <cmdsynopsis><command>authenticate_user</command> <arg choice="req"><replaceable>username</replaceable></arg> <arg choice="req"><replaceable>password</replaceable></arg></cmdsynopsis>
+           </term>
+           <listitem>
+             <variablelist>
+@@ -1237,7 +1235,9 @@
+       <variablelist>
+         <varlistentry role="usage-has-option-list">
+-          <term><cmdsynopsis><command>list_queues</command> <arg choice="opt">-p <replaceable>vhost</replaceable></arg> <arg choice="opt" role="usage-option-list"><replaceable>queueinfoitem</replaceable> ...</arg></cmdsynopsis></term>
++          <term>
++            <cmdsynopsis><command>list_queues</command> <arg choice="opt">-p <replaceable>vhost</replaceable></arg> <group choice="opt"><arg>--offline</arg><arg>--online</arg><arg>--local</arg></group> <arg choice="opt" role="usage-option-list"><replaceable>queueinfoitem</replaceable> ...</arg></cmdsynopsis>
++          </term>
+           <listitem>
+             <para>
+               Returns queue details. Queue details of the <command>/</command> virtual host
+@@ -1245,6 +1245,41 @@
+               override this default.
+             </para>
+             <para>
++              Displayed queues can be filtered by their status or
++              location using one of the following mutually exclusive
++              options:
++            </para>
++            <variablelist>
++
++              <varlistentry>
++                <term><cmdsynopsis><arg choice="opt">--offline</arg></cmdsynopsis></term>
++                <listitem>
++                  <para>
++                    List only those durable queues that are not
++                    currently running - i.e. they are located on
++                    inaccessible nodes.
++                  </para>
++                </listitem>
++              </varlistentry>
++              <varlistentry>
++                <term><cmdsynopsis><arg choice="opt">--online</arg></cmdsynopsis></term>
++                <listitem>
++                  <para>
++                    List queues that are currently live.
++                  </para>
++                </listitem>
++              </varlistentry>
++              <varlistentry>
++                <term><cmdsynopsis><arg choice="opt">--local</arg></cmdsynopsis></term>
++                <listitem>
++                  <para>
++                    List only those queues whose master process is
++                    located on the current node.
++                  </para>
++                </listitem>
++              </varlistentry>
++            </variablelist>
++            <para>
+               The <command>queueinfoitem</command> parameter is used to indicate which queue
+               information items to include in the results. The column order in the
+               results will match the order of the parameters.
+diff -ur rabbitmq-server.orig/docs/usage.xsl rabbitmq-server/docs/usage.xsl
+--- rabbitmq-server.orig/docs/usage.xsl        2016-03-31 17:21:29.000000000 +0300
++++ rabbitmq-server/docs/usage.xsl     2016-08-12 13:29:18.480081969 +0300
+@@ -8,7 +8,7 @@
+               encoding="UTF-8"
+               indent="no"/>
+ <xsl:strip-space elements="*"/>
+-<xsl:preserve-space elements="cmdsynopsis arg" />
++<xsl:preserve-space elements="cmdsynopsis arg group" />
+ <xsl:template match="/">
+ <!-- Pull out cmdsynopsis to show the command usage line. -->%% Generated, do not edit!
+@@ -68,6 +68,14 @@
+ <!-- Don't show anything else in command usage -->
+ <xsl:template match="text()" mode="command-usage"/>
++<xsl:template match="group[@choice='opt']">
++  <xsl:text>[</xsl:text>
++  <xsl:for-each select="arg">
++    <xsl:apply-templates/>
++    <xsl:if test="not(position() = last())"><xsl:text>|</xsl:text></xsl:if>
++  </xsl:for-each>
++  <xsl:text>]</xsl:text>
++</xsl:template>
+ <xsl:template match="arg[@choice='opt']">[<xsl:apply-templates/>]</xsl:template>
+ <xsl:template match="replaceable">&lt;<xsl:value-of select="."/>&gt;</xsl:template>
+diff -ur rabbitmq-server.orig/include/rabbit_cli.hrl rabbitmq-server/include/rabbit_cli.hrl
+--- rabbitmq-server.orig/include/rabbit_cli.hrl        2016-08-11 16:00:38.388018760 +0300
++++ rabbitmq-server/include/rabbit_cli.hrl     2016-08-12 13:29:18.480081969 +0300
+@@ -29,6 +29,7 @@
+ -define(RAM_OPT, "--ram").
+ -define(OFFLINE_OPT, "--offline").
+ -define(ONLINE_OPT, "--online").
++-define(LOCAL_OPT, "--local").
+ -define(NODE_DEF(Node), {?NODE_OPT, {option, Node}}).
+@@ -46,6 +47,7 @@
+ -define(RAM_DEF, {?RAM_OPT, flag}).
+ -define(OFFLINE_DEF, {?OFFLINE_OPT, flag}).
+ -define(ONLINE_DEF, {?ONLINE_OPT, flag}).
++-define(LOCAL_DEF, {?LOCAL_OPT, flag}).
+ %% Subset of standartized exit codes from sysexits.h, see
+ %% https://github.com/rabbitmq/rabbitmq-server/issues/396 for discussion.
+diff -ur rabbitmq-server.orig/src/rabbit_cli.erl rabbitmq-server/src/rabbit_cli.erl
+--- rabbitmq-server.orig/src/rabbit_cli.erl    2016-08-11 16:00:38.392018713 +0300
++++ rabbitmq-server/src/rabbit_cli.erl 2016-08-12 13:29:18.480081969 +0300
+@@ -18,7 +18,7 @@
+ -include("rabbit_cli.hrl").
+ -export([main/3, start_distribution/0, start_distribution/1,
+-         parse_arguments/4, filter_opts/2,
++         parse_arguments/4, mutually_exclusive_flags/3,
+          rpc_call/4, rpc_call/5, rpc_call/7]).
+ %%----------------------------------------------------------------------------
+@@ -42,8 +42,7 @@
+          [{string(), optdef()}], string(), [string()]) ->
+           parse_result().
+--spec filter_opts([{option_name(), option_value()}], [option_name()]) ->
+-          [boolean()].
++-spec mutually_exclusive_flags([{option_name(), option_value()}], term(), [{option_name(), term()}]) -> {ok, term()} | {error, string()}.
+ -spec rpc_call(node(), atom(), atom(), [any()]) -> any().
+ -spec rpc_call(node(), atom(), atom(), [any()], number()) -> any().
+@@ -250,20 +249,22 @@
+         {none, _, _}     -> no_command
+     end.
+-%% When we have a set of flags that are used for filtering, we want by
+-%% default to include every such option in our output. But if a user
+-%% explicitly specified any such flag, we want to include only items
+-%% which he has requested.
+-filter_opts(CurrentOptionValues, AllOptionNames) ->
+-    Explicit = lists:map(fun(OptName) ->
+-                                 proplists:get_bool(OptName, CurrentOptionValues)
+-                         end,
+-                         AllOptionNames),
+-    case lists:member(true, Explicit) of
+-        true ->
+-            Explicit;
+-        false ->
+-            lists:duplicate(length(AllOptionNames), true)
++mutually_exclusive_flags(CurrentOptionValues, Default, FlagsAndValues) ->
++    PresentFlags = lists:filtermap(fun({OptName, _} = _O) ->
++                                           proplists:get_bool(OptName, CurrentOptionValues)
++                                   end,
++                             FlagsAndValues),
++    case PresentFlags of
++        [] ->
++            {ok, Default};
++        [{_, Value}] ->
++            {ok, Value};
++        _ ->
++            Names = [ [$', N, $']  || {N, _} <- PresentFlags ],
++            CommaSeparated = string:join(lists:droplast(Names), ", "),
++            AndOneMore = lists:last(Names),
++            Msg = io_lib:format("Options ~s and ~s are mutually exclusive", [CommaSeparated, AndOneMore]),
++            {error, lists:flatten(Msg)}
+     end.
+ %%----------------------------------------------------------------------------
+diff -ur rabbitmq-server.orig/src/rabbit_control_main.erl rabbitmq-server/src/rabbit_control_main.erl
+--- rabbitmq-server.orig/src/rabbit_control_main.erl   2016-08-11 16:00:38.392018713 +0300
++++ rabbitmq-server/src/rabbit_control_main.erl        2016-08-12 13:29:18.481081958 +0300
+@@ -74,7 +74,7 @@
+          {clear_policy, [?VHOST_DEF]},
+          {list_policies, [?VHOST_DEF]},
+-         {list_queues, [?VHOST_DEF, ?OFFLINE_DEF, ?ONLINE_DEF]},
++         {list_queues, [?VHOST_DEF, ?OFFLINE_DEF, ?ONLINE_DEF, ?LOCAL_DEF]},
+          {list_exchanges, [?VHOST_DEF]},
+          {list_bindings, [?VHOST_DEF]},
+          {list_connections, [?VHOST_DEF]},
+@@ -632,12 +632,19 @@
+          true);
+ action(list_queues, Node, Args, Opts, Inform, Timeout) ->
+-    [Online, Offline] = rabbit_cli:filter_opts(Opts, [?ONLINE_OPT, ?OFFLINE_OPT]),
+-    Inform("Listing queues", []),
+-    VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+-    ArgAtoms = default_if_empty(Args, [name, messages]),
+-    call(Node, {rabbit_amqqueue, info_all, [VHostArg, ArgAtoms, Online, Offline]},
+-         ArgAtoms, Timeout);
++    case rabbit_cli:mutually_exclusive_flags(
++           Opts, all, [{?ONLINE_OPT, online}
++                      ,{?OFFLINE_OPT, offline}
++                      ,{?LOCAL_OPT, local}]) of
++        {ok, Filter} ->
++            Inform("Listing queues", []),
++            VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
++            ArgAtoms = default_if_empty(Args, [name, messages]),
++            call(Node, {rabbit_amqqueue, info_all, [VHostArg, ArgAtoms, Filter]},
++                 ArgAtoms, Timeout);
++        {error, ErrStr} ->
++            {error_string, ErrStr}
++    end;
+ action(list_exchanges, Node, Args, Opts, Inform, Timeout) ->
+     Inform("Listing exchanges", []),
+Only in rabbitmq-server/test: rabbitmqctl_integration_SUITE.erl
+diff -ur rabbitmq-server.orig/test/unit_SUITE.erl rabbitmq-server/test/unit_SUITE.erl
+--- rabbitmq-server.orig/test/unit_SUITE.erl   2016-08-11 16:00:38.402018595 +0300
++++ rabbitmq-server/test/unit_SUITE.erl        2016-08-12 13:29:18.481081958 +0300
+@@ -31,7 +31,7 @@
+     [
+       {parallel_tests, [parallel], [
+           arguments_parser,
+-          filtering_flags_parsing,
++          mutually_exclusive_flags_parsing,
+           {basic_header_handling, [parallel], [
+               write_table_with_invalid_existing_type,
+               invalid_existing_headers,
+@@ -135,26 +135,32 @@
+     true = SortRes(ExpRes) =:= SortRes(Fun(As)).
+-filtering_flags_parsing(_Config) ->
+-    Cases = [{[], [], []}
+-            ,{[{"--online", true}], ["--offline", "--online", "--third-option"], [false, true, false]}
+-            ,{[{"--online", true}, {"--third-option", true}, {"--offline", true}], ["--offline", "--online", "--third-option"], [true, true, true]}
+-            ,{[], ["--offline", "--online", "--third-option"], [true, true, true]}
+-            ],
+-    lists:foreach(fun({Vals, Opts, Expect}) ->
+-                          case rabbit_cli:filter_opts(Vals, Opts) of
+-                              Expect ->
++mutually_exclusive_flags_parsing(_Config) ->
++    Spec = [{"--online", online}
++           ,{"--offline", offline}
++           ,{"--local", local}],
++    Default = all,
++    Cases =[{["--online"], {ok, online}}
++           ,{[], {ok, Default}}
++           ,{["--offline"], {ok, offline}}
++           ,{["--local"], {ok, local}}
++           ,{["--offline", "--local"], {error, "Options '--offline' and '--local' are mutually exclusive"}}
++           ,{["--offline", "--online"], {error, "Options '--online' and '--offline' are mutually exclusive"}}
++           ,{["--offline", "--local", "--online"], {error, "Options '--online', '--offline' and '--local' are mutually exclusive"}}
++           ],
++    lists:foreach(fun({Opts, Expected}) ->
++                          ExpandedOpts = [ {Opt, true} || Opt <- Opts ],
++                          case rabbit_cli:mutually_exclusive_flags(ExpandedOpts, all, Spec) of
++                              Expected ->
+                                   ok;
+                               Got ->
+-                                  exit({no_match, Got, Expect, {args, Vals, Opts}})
++                                  exit({no_match, Got, Expected, {opts, Opts}})
+                           end
+-                  end,
+-                  Cases).
++                  end, Cases).
+ %% -------------------------------------------------------------------
+ %% basic_header_handling.
+ %% -------------------------------------------------------------------
+-
+ -define(XDEATH_TABLE,
+         [{<<"reason">>,       longstr,   <<"blah">>},
+          {<<"queue">>,        longstr,   <<"foo.bar.baz">>},
index 404474979bd1262880c49e4a96dd324784c528a9..131caae53dfe5d9f68b781d4ecb4c70f13112666 100644 (file)
@@ -2,11 +2,11 @@
 diff -r -u rabbitmq-server-3.6.1/scripts/rabbitmq-server rabbitmq-server/scripts/rabbitmq-server
 --- rabbitmq-server-3.6.1/scripts/rabbitmq-server      2016-02-09 15:55:29.000000000 +0300
 +++ rabbitmq-server/scripts/rabbitmq-server    2016-04-04 20:18:01.000000000 +0300
-@@ -115,6 +115,7 @@
- start_rabbitmq_server() {
+@@ -141,6 +141,7 @@
+     check_start_params &&
      RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \
      exec ${ERL_DIR}erl \
-+    -pa /var/lib/rabbitmq/native-code \
-         -pa ${RABBITMQ_EBIN_ROOT} \
++        -pa /var/lib/rabbitmq/native-code \
+         -pa ${RABBITMQ_SERVER_CODE_PATH} ${RABBITMQ_EBIN_ROOT} \
          ${RABBITMQ_START_RABBIT} \
          ${RABBITMQ_NAME_TYPE} ${RABBITMQ_NODENAME} \
diff --git a/debian/patches/rabbitmq-probe-ephemeral-port.patch b/debian/patches/rabbitmq-probe-ephemeral-port.patch
deleted file mode 100644 (file)
index 4c031a9..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-diff -ru rabbitmq-server-3.6.0.orig/deps/rabbit_common/src/rabbit_networking.erl rabbitmq-server-3.6.0/deps/rabbit_common/src/rabbit_networking.erl
---- rabbitmq-server-3.6.0.orig/deps/rabbit_common/src/rabbit_networking.erl    2015-12-14 13:41:01.000000000 +0300
-+++ rabbitmq-server-3.6.0/deps/rabbit_common/src/rabbit_networking.erl 2016-03-01 14:51:14.203046564 +0300
-@@ -50,7 +50,7 @@
- -include("rabbit.hrl").
- -include_lib("kernel/include/inet.hrl").
---define(FIRST_TEST_BIND_PORT, 10000).
-+-define(FIRST_TEST_BIND_PORT, 32768).
- %% POODLE
- -define(BAD_SSL_PROTOCOL_VERSIONS, [sslv3]).
-Только Ð² rabbitmq-server-3.6.0/deps/rabbit_common/src: rabbit_networking.erl.orig
index fd33cc5af7ba0250f0c0e262b45f6f5870bc27bb..2154bb57c17bed46d86b54fe2d18e9bc578d0365 100644 (file)
@@ -1,4 +1,3 @@
-rabbitmq-probe-ephemeral-port.patch
 native-code-path.patch
-zero-deps-systemd-1.patch
-erlang-thread-pool-autotune.patch
+list-only-local-queues-rmq-pr-911.patch
+health-check-rabbit-node-monitor-rmq-pr-915.patch
diff --git a/debian/patches/zero-deps-systemd-1.patch b/debian/patches/zero-deps-systemd-1.patch
deleted file mode 100644 (file)
index 8a88842..0000000
+++ /dev/null
@@ -1,216 +0,0 @@
-Только Ð² rabbitmq-server-3.6.1/docs: rabbitmq-server.service.example
---- a/scripts/rabbitmq-server
-+++ b/scripts/rabbitmq-server
-@@ -47,7 +47,7 @@ case "$(uname -s)" in
-                    exit $EX_CANTCREAT
-                fi
-                if ! echo $$ > ${RABBITMQ_PID_FILE}; then
--                   # Bettern diagnostics - otherwise the only report in logs is about failed 'echo'
-+                   # Better diagnostics - otherwise the only report in logs is about failed 'echo'
-                    # command, but without any other details: neither what script has failed nor what
-                    # file output was redirected to.
-                    echo "Failed to write pid file: ${RABBITMQ_PID_FILE}"
-@@ -58,8 +58,13 @@ esac
- RABBITMQ_EBIN_ROOT="${RABBITMQ_HOME}/ebin"
-+[ "$NOTIFY_SOCKET" ] && RUNNING_UNDER_SYSTEMD=true
-+
- set +e
-+# NOTIFY_SOCKET is needed here to prevent epmd from impersonating the
-+# success of our startup sequence to systemd.
-+NOTIFY_SOCKET= \
- RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \
- RABBITMQ_DIST_PORT=$RABBITMQ_DIST_PORT \
-     ${ERL_DIR}erl -pa "$RABBITMQ_EBIN_ROOT" \
-@@ -152,7 +157,20 @@ stop_rabbitmq_server() {
-     fi
- }
--if [ 'x' = "x$RABBITMQ_ALLOW_INPUT" -a -z "$detached" ]; then
-+if [ "$RABBITMQ_ALLOW_INPUT" -o "$RUNNING_UNDER_SYSTEMD" -o "$detached" ]; then
-+    # Run erlang VM directly, completely replacing current shell
-+    # process - so the pid file written in the code above will be
-+    # valid (unless detached, which is also handled in the code
-+    # above).
-+    #
-+    # And also this is the correct mode to run the broker under
-+    # systemd - there is no need in a proxy process that converts
-+    # signals to graceful shutdown command, the unit file should already
-+    # contain instructions for graceful shutdown. Also by removing
-+    # this additional process we could simply use value returned by
-+    # `os:getpid/0` for a systemd ready notification.
-+    start_rabbitmq_server "$@"
-+else
-     # When RabbitMQ runs in the foreground but the Erlang shell is
-     # disabled, we setup signal handlers to stop RabbitMQ properly. This
-     # is at least useful in the case of Docker.
-@@ -161,7 +179,7 @@ if [ 'x' = "x$RABBITMQ_ALLOW_INPUT" -a -
-     RABBITMQ_SERVER_START_ARGS="${RABBITMQ_SERVER_START_ARGS} +B i"
-     # Signal handlers. They all stop RabbitMQ properly (using
--    # rabbitmqctl stop). Depending on the signal, this script will exwit
-+    # rabbitmqctl stop). Depending on the signal, this script will exit
-     # with a non-zero error code:
-     #   SIGHUP SIGTERM SIGTSTP
-     #     They are considered a normal process termination, so the script
-@@ -177,6 +195,4 @@ if [ 'x' = "x$RABBITMQ_ALLOW_INPUT" -a -
-     # Block until RabbitMQ exits or a signal is caught.
-     # Waits for last command (which is start_rabbitmq_server)
-     wait $!
--else
--    start_rabbitmq_server "$@"
- fi
---- a/src/rabbit.erl
-+++ b/src/rabbit.erl
-@@ -284,16 +284,120 @@ broker_start() ->
-     Plugins = rabbit_plugins:setup(),
-     ToBeLoaded = Plugins ++ ?APPS,
-     start_apps(ToBeLoaded),
--    case os:type() of
--        {win32, _} -> ok;
--        _ -> case code:load_file(sd_notify) of
--                 {module, sd_notify} -> SDNotify = sd_notify,
--                                        SDNotify:sd_notify(0, "READY=1");
--                 {error, _} -> os:cmd("systemd-notify --ready")
--             end
--    end,
-+    maybe_sd_notify(),
-     ok = log_broker_started(rabbit_plugins:active()).
-+%% Try to send systemd ready notification if it makes sense in the
-+%% current environment. standard_error is used intentionally in all
-+%% logging statements, so all this messages will end in systemd
-+%% journal.
-+maybe_sd_notify() ->
-+    case sd_notify_ready() of
-+        false ->
-+            io:format(standard_error, "systemd READY notification failed, beware of timeouts~n", []);
-+        _ ->
-+            ok
-+    end.
-+
-+sd_notify_ready() ->
-+    case {os:type(), os:getenv("NOTIFY_SOCKET")} of
-+        {{win32, _}, _} ->
-+            true;
-+        {_, [_|_]} -> %% Non-empty NOTIFY_SOCKET, give it a try
-+            sd_notify_legacy() orelse sd_notify_socat();
-+        _ ->
-+            true
-+    end.
-+
-+sd_notify_data() ->
-+    "READY=1\nSTATUS=Initialized\nMAINPID=" ++ os:getpid() ++ "\n".
-+
-+sd_notify_legacy() ->
-+    case code:load_file(sd_notify) of
-+        {module, sd_notify} ->
-+            SDNotify = sd_notify,
-+            SDNotify:sd_notify(0, sd_notify_data()),
-+            true;
-+        {error, _} ->
-+            false
-+    end.
-+
-+%% socat(1) is the most portable way the sd_notify could be
-+%% implemented in erlang, without introducing some NIF. Currently the
-+%% following issues prevent us from implementing it in a more
-+%% reasonable way:
-+%% - systemd-notify(1) is unstable for non-root users
-+%% - erlang doesn't support unix domain sockets.
-+%%
-+%% Some details on how we ended with such a solution:
-+%%   https://github.com/rabbitmq/rabbitmq-server/issues/664
-+sd_notify_socat() ->
-+    case sd_current_unit() of
-+        {ok, Unit} ->
-+            io:format(standard_error, "systemd unit for activation check: \"~s\"~n", [Unit]),
-+            sd_notify_socat(Unit);
-+        _ ->
-+            false
-+    end.
-+
-+socat_socket_arg("@" ++ AbstractUnixSocket) ->
-+    "abstract-sendto:" ++ AbstractUnixSocket;
-+socat_socket_arg(UnixSocket) ->
-+    "unix-sendto:" ++ UnixSocket.
-+
-+sd_open_port() ->
-+    open_port(
-+      {spawn_executable, os:find_executable("socat")},
-+      [{args, [socat_socket_arg(os:getenv("NOTIFY_SOCKET")), "STDIO"]},
-+       use_stdio, out]).
-+
-+sd_notify_socat(Unit) ->
-+    case sd_open_port() of
-+        {'EXIT', Exit} ->
-+            io:format(standard_error, "Failed to start socat ~p~n", [Exit]),
-+            false;
-+        Port ->
-+            Port ! {self(), {command, sd_notify_data()}},
-+            Result = sd_wait_activation(Port, Unit),
-+            port_close(Port),
-+            Result
-+    end.
-+
-+sd_current_unit() ->
-+    case catch re:run(os:cmd("systemctl status " ++ os:getpid()), "([-.@0-9a-zA-Z]+)", [unicode, {capture, all_but_first, list}]) of
-+        {'EXIT', _} ->
-+            error;
-+        {match, [Unit]} ->
-+            {ok, Unit};
-+        _ ->
-+            error
-+    end.
-+
-+sd_wait_activation(Port, Unit) ->
-+    case os:find_executable("systemctl") of
-+        false ->
-+            io:format(standard_error, "'systemctl' unavailable, falling back to sleep~n", []),
-+            timer:sleep(5000),
-+            true;
-+        _ ->
-+            sd_wait_activation(Port, Unit, 10)
-+    end.
-+
-+sd_wait_activation(_, _, 0) ->
-+    io:format(standard_error, "Service still in 'activating' state, bailing out~n", []),
-+    false;
-+sd_wait_activation(Port, Unit, AttemptsLeft) ->
-+    case os:cmd("systemctl show --property=ActiveState " ++ Unit) of
-+        "ActiveState=activating\n" ->
-+            timer:sleep(1000),
-+            sd_wait_activation(Port, Unit, AttemptsLeft - 1);
-+        "ActiveState=" ++ _ ->
-+            true;
-+        _ = Err->
-+            io:format(standard_error, "Unexpected status from systemd ~p~n", [Err]),
-+            false
-+    end.
-+
- start_it(StartFun) ->
-     Marker = spawn_link(fun() -> receive stop -> ok end end),
-     case catch register(rabbit_boot, Marker) of
-@@ -332,6 +436,10 @@ stop_and_halt() ->
-         stop()
-     after
-         rabbit_log:info("Halting Erlang VM~n", []),
-+        %% Also duplicate this information to stderr, so console where
-+        %% foreground broker was running (or systemd journal) will
-+        %% contain information about graceful termination.
-+        io:format(standard_error, "Gracefully halting Erlang VM~n", []),
-         init:stop()
-     end,
-     ok.
-@@ -693,7 +801,8 @@ print_banner() ->
-               "~n  ##########  Logs: ~s"
-               "~n  ######  ##        ~s"
-               "~n  ##########"
--              "~n              Starting broker...",
-+              "~n              Starting broker..."
-+              "~n",
-               [Product, Version, ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE,
-                log_location(kernel), log_location(sasl)]).
old mode 100644 (file)
new mode 100755 (executable)
index 5927e7a891bd603a3b6db92f9e5298f36e67efb0..5fdba494ac9f7842edbb56af8539f5f372bbd9aa 100755 (executable)
@@ -32,84 +32,21 @@ fi
 
 chown -R rabbitmq:rabbitmq /var/lib/rabbitmq
 chown -R rabbitmq:rabbitmq /var/log/rabbitmq
+chmod 750 /var/lib/rabbitmq/mnesia
+chmod -R o-rwx,g-w /var/lib/rabbitmq/mnesia
 
 HIPE_DIRECTORY=/var/lib/rabbitmq/native-code
 
-ensure_erlang_cookie () {
-    HOME=/root/ erl -noinput -sname root-cookie-ensure-$$ -s init stop
-    echo "$(cat $HOME/.erlang.cookie)"
-}
-
-ensure_hipe_starter () {
-    local cookie="$1"
-    HOME=/root \
-    RABBITMQ_NODENAME=rabbit-hipe-compiler@localhost \
-    RABBITMQ_NODE_ONLY=true \
-    RABBITMQ_NODE_PORT=65000 \
-    RABBITMQ_DIST_PORT=65001 \
-    RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="-cookie \"$cookie\"" \
-    /usr/lib/rabbitmq/bin/rabbitmq-server -detached
-}
-
-hipe_starter_eval () {
-    local cookie="$1"
-    local code="$2"
-    HOME=/root \
-    RABBITMQ_NODENAME=rabbit-hipe-compiler@localhost \
-    RABBITMQ_CTL_ERL_ARGS="-cookie \"$cookie\"" \
-    /usr/lib/rabbitmq/bin/rabbitmqctl eval "$code"
-}
-
-hipe_compile () {
-    local retry_no
-    local cookie
-
-    rm -rf ${HIPE_DIRECTORY}
-    mkdir -p ${HIPE_DIRECTORY}
-
-    pkill -f rabbit-hipe-compiler || true
-
-    cookie="$(ensure_erlang_cookie)"
-    ensure_hipe_starter "$cookie"
-
-    for retry_no in $(seq 1 10); do
-        if hipe_starter_eval "$cookie" "1." > /dev/null 2>&1 ; then
-            break
-        fi
-    done
-
-    # Don't check whether we've found running node in the loop above - following eval call will catch
-    # this error naturally.
-    hipe_starter_eval "$cookie" "
-application:load(rabbit),
-application:load(rabbit_common),
-{ok, Mods} = application:get_env(rabbit, hipe_modules),
-ModsToDump = [ Mod || Mod <- Mods, Mod:module_info(native) =:= false ],
-[ begin
-    {M, BeamCode, _} = code:get_object_code(M),
-    BeamName = \"${HIPE_DIRECTORY}/\" ++ atom_to_list(M) ++ \".beam\",
-    {ok, {Architecture, NativeCode}} = hipe:compile(M, [], BeamCode, [o3]),
-    {ok, _, Chunks0} = beam_lib:all_chunks(BeamCode),
-    ChunkName = hipe_unified_loader:chunk_name(Architecture),
-    Chunks1 = lists:keydelete(ChunkName, 1, Chunks0),
-    Chunks = Chunks1 ++ [{ChunkName,NativeCode}],
-    {ok, BeamPlusNative} = beam_lib:build_module(Chunks),
-    file:write_file(BeamName, BeamPlusNative),
-    M
-  end
-  || M <- ModsToDump ].
-"
-
-    pkill -f rabbit-hipe-compiler || true
-}
-
 case "$1" in
     configure)
         if [ -f /etc/rabbitmq/rabbitmq.conf ] && \
            [ ! -f /etc/rabbitmq/rabbitmq-env.conf ]; then
             mv /etc/rabbitmq/rabbitmq.conf /etc/rabbitmq/rabbitmq-env.conf
         fi
-        hipe_compile
+        if command -v systemctl > /dev/null; then
+            systemctl start epmd.socket
+        fi
+        rabbitmqctl hipe_compile $HIPE_DIRECTORY
     ;;
 
     abort-upgrade|abort-remove|abort-deconfigure)
index 2d6d389e0e4da7b7fe6a3bec48229ae8846e8a3f..90bac921ca1e7d85483b6d4ec8987528591aaf21 100644 (file)
@@ -1,15 +1,18 @@
 [Unit]
-Description=RabbitMQ Messaging Server
-After=network.target
+Description=RabbitMQ broker
+After=network.target epmd.socket
+Requires=network.target epmd.socket
 
 [Service]
+LimitNOFILE=105472
 Type=notify
-NotifyAccess=all
 User=rabbitmq
-SyslogIdentifier=rabbitmq
-LimitNOFILE=65536
-ExecStart=/usr/sbin/rabbitmq-server
-ExecStop=/usr/sbin/rabbitmqctl stop
+Group=rabbitmq
+NotifyAccess=all
+TimeoutStartSec=3600
+WorkingDirectory=/var/lib/rabbitmq
+ExecStart=/usr/lib/rabbitmq/bin/rabbitmq-server
+ExecStop=/usr/lib/rabbitmq/bin/rabbitmqctl stop
 
 [Install]
 WantedBy=multi-user.target
index 9ec4287eaac87326a609f4beeea66e193c8658c4..b5e42a9fa9628557cd72ec5ab8c7840f3fc3be5a 100755 (executable)
@@ -56,7 +56,7 @@ override_dh_auto_install:
                $(DEB_DESTDIR)$(PREFIX)/lib/ocf/resource.d/rabbitmq/rabbitmq-server
        install -p -D -m 0755 scripts/rabbitmq-server-ha.ocf \
                $(DEB_DESTDIR)$(PREFIX)/lib/ocf/resource.d/rabbitmq/rabbitmq-server-ha
-       install -p -D -m 0644 scripts/set_rabbitmq_policy.sh \
+       install -p -D -m 0644 docs/set_rabbitmq_policy.sh.example \
                $(DEB_DESTDIR)$(PREFIX)/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh.example
 
        rm $(DEB_DESTDIR)$(RMQ_ERLAPP_DIR)/LICENSE* \
diff --git a/rabbitmq-server/CODE_OF_CONDUCT.md b/rabbitmq-server/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index 833334dcd6c0ec625b3c89e61431e80b48878499..e211234bf6af9b8831826a749e9d5297ab7ff0df 100644 (file)
@@ -21,7 +21,8 @@ EXTRA_SOURCES += $(USAGES_ERL)
 .DEFAULT_GOAL = all
 $(PROJECT).d:: $(EXTRA_SOURCES)
 
-DEP_PLUGINS = rabbit_common/mk/rabbitmq-run.mk \
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \
+             rabbit_common/mk/rabbitmq-run.mk \
              rabbit_common/mk/rabbitmq-dist.mk \
              rabbit_common/mk/rabbitmq-tools.mk
 
@@ -41,6 +42,7 @@ DISTRIBUTED_DEPS := rabbitmq_amqp1_0 \
                    rabbitmq_event_exchange \
                    rabbitmq_federation \
                    rabbitmq_federation_management \
+                   rabbitmq_jms_topic_exchange \
                    rabbitmq_management \
                    rabbitmq_management_agent \
                    rabbitmq_management_visualiser \
@@ -50,7 +52,9 @@ DISTRIBUTED_DEPS := rabbitmq_amqp1_0 \
                    rabbitmq_shovel \
                    rabbitmq_shovel_management \
                    rabbitmq_stomp \
+                   rabbitmq_top \
                    rabbitmq_tracing \
+                   rabbitmq_trust_store \
                    rabbitmq_web_dispatch \
                    rabbitmq_web_stomp \
                    rabbitmq_web_stomp_examples
@@ -64,6 +68,9 @@ DEPS += $(DISTRIBUTED_DEPS)
 endif
 endif
 
+# FIXME: Remove rabbitmq_test as TEST_DEPS from here for now.
+TEST_DEPS := amqp_client meck $(filter-out rabbitmq_test,$(TEST_DEPS))
+
 include erlang.mk
 
 # --------------------------------------------------------------------
@@ -82,12 +89,6 @@ ifdef CREDIT_FLOW_TRACING
 RMQ_ERLC_OPTS += -DCREDIT_FLOW_TRACING=true
 endif
 
-ERTS_VER := $(shell erl -version 2>&1 | sed -E 's/.* version //')
-USE_SPECS_MIN_ERTS_VER = 5.11
-ifeq ($(call compare_version,$(ERTS_VER),$(USE_SPECS_MIN_ERTS_VER),>=),true)
-RMQ_ERLC_OPTS += -Duse_specs
-endif
-
 ifndef USE_PROPER_QC
 # PropEr needs to be installed for property checking
 # http://proper.softlab.ntua.gr/
@@ -95,33 +96,11 @@ USE_PROPER_QC := $(shell $(ERL) -eval 'io:format({module, proper} =:= code:ensur
 RMQ_ERLC_OPTS += $(if $(filter true,$(USE_PROPER_QC)),-Duse_proper_qc)
 endif
 
-ERLC_OPTS += $(RMQ_ERLC_OPTS)
-
 clean:: clean-extra-sources
 
 clean-extra-sources:
        $(gen_verbose) rm -f $(EXTRA_SOURCES)
 
-# --------------------------------------------------------------------
-# Tests.
-# --------------------------------------------------------------------
-
-TARGETS_IN_RABBITMQ_TEST = $(patsubst %,%-in-rabbitmq_test,\
-                          tests full unit lite conformance16 lazy-vq-tests)
-
-.PHONY: $(TARGETS_IN_RABBITMQ_TEST)
-
-TEST_ERLC_OPTS += $(RMQ_ERLC_OPTS)
-
-tests:: tests-in-rabbitmq_test
-
-$(TARGETS_IN_RABBITMQ_TEST): $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
-    test-build $(DEPS_DIR)/rabbitmq_test
-       $(MAKE) -C $(DEPS_DIR)/rabbitmq_test \
-               IS_DEP=1 \
-               RABBITMQ_BROKER_DIR=$(RABBITMQ_BROKER_DIR) \
-               $(patsubst %-in-rabbitmq_test,%,$@)
-
 # --------------------------------------------------------------------
 # Documentation.
 # --------------------------------------------------------------------
@@ -221,6 +200,7 @@ RSYNC_FLAGS += -a $(RSYNC_V)                \
               --exclude 'plugins/'                     \
               --exclude '$(notdir $(DIST_DIR))/'       \
               --exclude '/$(notdir $(PACKAGES_DIR))/'  \
+              --exclude '/PACKAGES/'                   \
               --exclude '/cowboy/doc/'                 \
               --exclude '/cowboy/examples/'            \
               --exclude '/rabbitmq_amqp1_0/test/swiftmq/build/'\
index d64ab34a16cd7546ca80aa8fd457953be114554f..ae6f9b2d014294ccaeda35f528c49a970a4a2b6e 100644 (file)
@@ -44,4 +44,4 @@ See [building RabbitMQ server from source](http://www.rabbitmq.com/build-server.
 
 ## Copyright
 
-(c) Pivotal Software Inc., 2007-2015.
+(c) Pivotal Software Inc., 2007-2016.
diff --git a/rabbitmq-server/build.config b/rabbitmq-server/build.config
deleted file mode 100644 (file)
index b143068..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-# Do *not* comment or remove core modules
-# unless you know what you are doing.
-#
-# Feel free to comment plugins out however.
-
-# Core modules.
-core/core
-index/*
-core/index
-core/deps
-
-# Plugins that must run before Erlang code gets compiled.
-plugins/erlydtl
-plugins/protobuffs
-
-# Core modules, continued.
-core/erlc
-core/docs
-core/rel
-core/test
-core/compat
-
-# Plugins.
-plugins/asciidoc
-plugins/bootstrap
-plugins/c_src
-plugins/ci
-plugins/ct
-plugins/dialyzer
-# plugins/edoc
-plugins/elvis
-plugins/escript
-plugins/eunit
-plugins/relx
-plugins/shell
-plugins/triq
-plugins/xref
-
-# Plugins enhancing the functionality of other plugins.
-plugins/cover
-
-# Core modules which can use variables from plugins.
-core/deps-tools
diff --git a/rabbitmq-server/deps/amqp_client/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/amqp_client/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index f18807e5ce7b544bc2eea2e729f57062c5c32d5a..daa44befe0762f7284eb8ba86b5141b410430d86 100644 (file)
@@ -7,9 +7,12 @@ endif
 # Release artifacts are put in $(PACKAGES_DIR).
 PACKAGES_DIR ?= $(abspath PACKAGES)
 
-TEST_DEPS = rabbit
+TEST_DEPS += rabbit
 
-DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \
+             rabbit_common/mk/rabbitmq-dist.mk \
+             rabbit_common/mk/rabbitmq-run.mk \
+             rabbit_common/mk/rabbitmq-tools.mk
 
 # FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
 # reviewed and merged.
@@ -20,14 +23,6 @@ ERLANG_MK_COMMIT = rabbitmq-tmp
 include rabbitmq-components.mk
 include erlang.mk
 
-# --------------------------------------------------------------------
-# Tests.
-# --------------------------------------------------------------------
-
-include test.mk
-
-tests:: all_tests
-
 # --------------------------------------------------------------------
 # Distribution.
 # --------------------------------------------------------------------
@@ -36,6 +31,7 @@ tests:: all_tests
 
 distribution: docs source-dist package
 
+docs:: edoc
 edoc: doc/overview.edoc
 
 doc/overview.edoc: src/overview.edoc.in
diff --git a/rabbitmq-server/deps/amqp_client/build.config b/rabbitmq-server/deps/amqp_client/build.config
deleted file mode 100644 (file)
index 8f32456..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-# Do *not* comment or remove core modules
-# unless you know what you are doing.
-#
-# Feel free to comment plugins out however.
-
-# Core modules.
-core/core
-index/*
-core/index
-core/deps
-
-# Plugins that must run before Erlang code gets compiled.
-plugins/erlydtl
-plugins/protobuffs
-
-# Core modules, continued.
-core/erlc
-core/docs
-core/rel
-core/test
-core/compat
-
-# Plugins.
-plugins/asciidoc
-plugins/bootstrap
-plugins/c_src
-plugins/ci
-# plugins/ct
-plugins/dialyzer
-plugins/edoc
-plugins/elvis
-plugins/escript
-# plugins/eunit
-plugins/relx
-plugins/shell
-plugins/triq
-plugins/xref
-
-# Plugins enhancing the functionality of other plugins.
-plugins/cover
-
-# Core modules which can use variables from plugins.
-core/deps-tools
diff --git a/rabbitmq-server/deps/amqp_client/ci/test.sh b/rabbitmq-server/deps/amqp_client/ci/test.sh
new file mode 100755 (executable)
index 0000000..a88f91c
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+set -ex
+
+SCRIPT=$0
+SCRIPT_DIR=$(cd $(dirname "$SCRIPT") && pwd)
+SRC_DIR=$(cd "$SCRIPT_DIR/.." && pwd)
+DEPS_DIR=$(cd "$SRC_DIR/.." && pwd)
+
+case $(uname -s) in
+FreeBSD) MAKE=gmake ;;
+*)       MAKE=make ;;
+esac
+
+(
+  cd "$SRC_DIR"
+  $MAKE dep_ranch="cp /ranch" DEPS_DIR="$DEPS_DIR" tests
+)
diff --git a/rabbitmq-server/deps/amqp_client/ci/test.yml b/rabbitmq-server/deps/amqp_client/ci/test.yml
new file mode 100644 (file)
index 0000000..1449b6a
--- /dev/null
@@ -0,0 +1,12 @@
+---
+platform: linux
+inputs:
+- name: amqp_client
+- name: rabbit_common
+- name: rabbit
+image_resource:
+  type: docker-image
+  source:
+    repository: pivotalrabbitmq/ci
+run:
+  path: amqp_client/ci/test.sh
index de8be5baae68b5bd0a31c671fc553622695e7721..efbcf5cd11a59ef1425ead2dfa4b0514e62b437b 100644 (file)
@@ -16,7 +16,7 @@
 
 ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
 
-ERLANG_MK_VERSION = 2.0.0-pre.2-16-gb52203c-dirty
+ERLANG_MK_VERSION = 2.0.0-pre.2-76-g427cfb8
 
 # Core configuration.
 
@@ -84,7 +84,7 @@ all:: deps app rel
 rel::
        $(verbose) :
 
-check:: clean app tests
+check:: tests
 
 clean:: clean-crashdump
 
@@ -421,6 +421,14 @@ pkg_boss_db_fetch = git
 pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
 pkg_boss_db_commit = master
 
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
 PACKAGES += bson
 pkg_bson_name = bson
 pkg_bson_description = BSON documents in Erlang, see bsonspec.org
@@ -885,14 +893,6 @@ pkg_dh_date_fetch = git
 pkg_dh_date_repo = https://github.com/daleharvey/dh_date
 pkg_dh_date_commit = master
 
-PACKAGES += dhtcrawler
-pkg_dhtcrawler_name = dhtcrawler
-pkg_dhtcrawler_description = dhtcrawler is a DHT crawler written in erlang. It can join a DHT network and crawl many P2P torrents.
-pkg_dhtcrawler_homepage = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_fetch = git
-pkg_dhtcrawler_repo = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_commit = master
-
 PACKAGES += dirbusterl
 pkg_dirbusterl_name = dirbusterl
 pkg_dirbusterl_description = DirBuster successor in Erlang
@@ -1139,7 +1139,7 @@ pkg_elvis_description = Erlang Style Reviewer
 pkg_elvis_homepage = https://github.com/inaka/elvis
 pkg_elvis_fetch = git
 pkg_elvis_repo = https://github.com/inaka/elvis
-pkg_elvis_commit = 0.2.4
+pkg_elvis_commit = master
 
 PACKAGES += emagick
 pkg_emagick_name = emagick
@@ -1781,6 +1781,14 @@ pkg_geef_fetch = git
 pkg_geef_repo = https://github.com/carlosmn/geef
 pkg_geef_commit = master
 
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
 PACKAGES += gen_cycle
 pkg_gen_cycle_name = gen_cycle
 pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
@@ -1981,6 +1989,14 @@ pkg_hyper_fetch = git
 pkg_hyper_repo = https://github.com/GameAnalytics/hyper
 pkg_hyper_commit = master
 
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
 PACKAGES += ibrowse
 pkg_ibrowse_name = ibrowse
 pkg_ibrowse_description = Erlang HTTP client
@@ -2501,6 +2517,14 @@ pkg_merl_fetch = git
 pkg_merl_repo = https://github.com/richcarl/merl
 pkg_merl_commit = master
 
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
 PACKAGES += mimetypes
 pkg_mimetypes_name = mimetypes
 pkg_mimetypes_description = Erlang MIME types library
@@ -2733,14 +2757,6 @@ pkg_oauth2_fetch = git
 pkg_oauth2_repo = https://github.com/kivra/oauth2
 pkg_oauth2_commit = master
 
-PACKAGES += oauth2c
-pkg_oauth2c_name = oauth2c
-pkg_oauth2c_description = Erlang OAuth2 Client
-pkg_oauth2c_homepage = https://github.com/kivra/oauth2_client
-pkg_oauth2c_fetch = git
-pkg_oauth2c_repo = https://github.com/kivra/oauth2_client
-pkg_oauth2c_commit = master
-
 PACKAGES += octopus
 pkg_octopus_name = octopus
 pkg_octopus_description = Small and flexible pool manager written in Erlang
@@ -3533,6 +3549,14 @@ pkg_stripe_fetch = git
 pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
 pkg_stripe_commit = v1
 
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
 PACKAGES += surrogate
 pkg_surrogate_name = surrogate
 pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
@@ -3907,7 +3931,7 @@ pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
 pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
 pkg_xref_runner_fetch = git
 pkg_xref_runner_repo = https://github.com/inaka/xref_runner
-pkg_xref_runner_commit = 0.2.0
+pkg_xref_runner_commit = 0.2.3
 
 PACKAGES += yamerl
 pkg_yamerl_name = yamerl
@@ -4092,7 +4116,10 @@ endif
 # While Makefile file could be GNUmakefile or makefile,
 # in practice only Makefile is needed so far.
 define dep_autopatch
-       if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+               $(call dep_autopatch_erlang_mk,$(1)); \
+       elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
                if [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
                        $(call dep_autopatch2,$(1)); \
                elif [ 0 != `grep -ci rebar $(DEPS_DIR)/$(1)/Makefile` ]; then \
@@ -4100,12 +4127,7 @@ define dep_autopatch
                elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i rebar '{}' \;`" ]; then \
                        $(call dep_autopatch2,$(1)); \
                else \
-                       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
-                               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
-                               $(call dep_autopatch_erlang_mk,$(1)); \
-                       else \
-                               $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
-                       fi \
+                       $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
                fi \
        else \
                if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
@@ -4117,8 +4139,11 @@ define dep_autopatch
 endef
 
 define dep_autopatch2
+       if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+               $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+       fi; \
        $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
-       if [ -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
+       if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
                $(call dep_autopatch_fetch_rebar); \
                $(call dep_autopatch_rebar,$(1)); \
        else \
@@ -4256,57 +4281,6 @@ define dep_autopatch_rebar.erl
                                Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
                end
        end(),
-       FindFirst = fun(F, Fd) ->
-               case io:parse_erl_form(Fd, undefined) of
-                       {ok, {attribute, _, compile, {parse_transform, PT}}, _} ->
-                               [PT, F(F, Fd)];
-                       {ok, {attribute, _, compile, CompileOpts}, _} when is_list(CompileOpts) ->
-                               case proplists:get_value(parse_transform, CompileOpts) of
-                                       undefined -> [F(F, Fd)];
-                                       PT -> [PT, F(F, Fd)]
-                               end;
-                       {ok, {attribute, _, include, Hrl}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
-                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                       _ ->
-                                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ Hrl, [read]) of
-                                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                                       _ -> [F(F, Fd)]
-                                               end
-                               end;
-                       {ok, {attribute, _, include_lib, "$(1)/include/" ++ Hrl}, _} ->
-                               {ok, HrlFd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]),
-                               [F(F, HrlFd), F(F, Fd)];
-                       {ok, {attribute, _, include_lib, Hrl}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
-                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                       _ -> [F(F, Fd)]
-                               end;
-                       {ok, {attribute, _, import, {Imp, _}}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(Imp) ++ ".erl", [read]) of
-                                       {ok, ImpFd} -> [Imp, F(F, ImpFd), F(F, Fd)];
-                                       _ -> [F(F, Fd)]
-                               end;
-                       {eof, _} ->
-                               file:close(Fd),
-                               [];
-                       _ ->
-                               F(F, Fd)
-               end
-       end,
-       fun() ->
-               ErlFiles = filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)*.erl"),
-               First0 = lists:usort(lists:flatten([begin
-                       {ok, Fd} = file:open(F, [read]),
-                       FindFirst(FindFirst, Fd)
-               end || F <- ErlFiles])),
-               First = lists:flatten([begin
-                       {ok, Fd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", [read]),
-                       FindFirst(FindFirst, Fd)
-               end || M <- First0, lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)]) ++ First0,
-               Write(["COMPILE_FIRST +=", [[" ", atom_to_list(M)] || M <- First,
-                       lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)], "\n"])
-       end(),
        Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
        Write("\npreprocess::\n"),
        Write("\npre-deps::\n"),
@@ -4419,9 +4393,10 @@ define dep_autopatch_rebar.erl
                                        Output, ": $$\(foreach ext,.c .C .cc .cpp,",
                                                "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
                                        "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
-                                       case filename:extension(Output) of
-                                               [] -> "\n";
-                                               _ -> " -shared\n"
+                                       case {filename:extension(Output), $(PLATFORM)} of
+                                           {[], _} -> "\n";
+                                           {_, darwin} -> "\n";
+                                           _ -> " -shared\n"
                                        end])
                        end,
                        [PortSpec(S) || S <- PortSpecs]
@@ -4490,6 +4465,15 @@ define dep_autopatch_app.erl
        halt()
 endef
 
+define dep_autopatch_appsrc_script.erl
+       AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+       AppSrcScript = AppSrc ++ ".script",
+       Bindings = erl_eval:new_bindings(),
+       {ok, Conf} = file:script(AppSrcScript, Bindings),
+       ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+       halt()
+endef
+
 define dep_autopatch_appsrc.erl
        AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
        AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
@@ -4576,10 +4560,11 @@ $(DEPS_DIR)/$(call dep_name,$1):
                exit 17; \
        fi
        $(verbose) mkdir -p $(DEPS_DIR)
-       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$1)),$1)
-       $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure.ac -o -f $(DEPS_DIR)/$(DEP_NAME)/configure.in ]; then \
-               echo " AUTO  " $(DEP_STR); \
-               cd $(DEPS_DIR)/$(DEP_NAME) && autoreconf -Wall -vif -I m4; \
+       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+       $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+                       && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+               echo " AUTO  " $(1); \
+               cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
        fi
        - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
                echo " CONF  " $(DEP_STR); \
@@ -4672,28 +4657,10 @@ dtl_verbose = $(dtl_verbose_$(V))
 
 # Core targets.
 
-define erlydtl_compile.erl
-       [begin
-               Module0 = case "$(strip $(DTL_FULL_PATH))" of
-                       "" ->
-                               filename:basename(F, ".dtl");
-                       _ ->
-                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
-                               re:replace(F2, "/",  "_",  [{return, list}, global])
-               end,
-               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
-               case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
-                       ok -> ok;
-                       {ok, _} -> ok
-               end
-       end || F <- string:tokens("$(1)", " ")],
-       halt().
-endef
-
-ifneq ($(wildcard src/),)
-
 DTL_FILES = $(sort $(call core_find,$(DTL_PATH),*.dtl))
 
+ifneq ($(DTL_FILES),)
+
 ifdef DTL_FULL_PATH
 BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(subst /,_,$(DTL_FILES:$(DTL_PATH)%=%))))
 else
@@ -4701,7 +4668,7 @@ BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(notdir $(DTL_FILES
 endif
 
 ifneq ($(words $(DTL_FILES)),0)
-# Rebuild everything when the Makefile changes.
+# Rebuild templates when the Makefile changes.
 $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
        @mkdir -p $(ERLANG_MK_TMP)
        @if test -f $@; then \
@@ -4712,9 +4679,28 @@ $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
 ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
 endif
 
-ebin/$(PROJECT).app:: $(DTL_FILES)
+define erlydtl_compile.erl
+       [begin
+               Module0 = case "$(strip $(DTL_FULL_PATH))" of
+                       "" ->
+                               filename:basename(F, ".dtl");
+                       _ ->
+                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
+                               re:replace(F2, "/",  "_",  [{return, list}, global])
+               end,
+               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+               case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
+                       ok -> ok;
+                       {ok, _} -> ok
+               end
+       end || F <- string:tokens("$(1)", " ")],
+       halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
        $(if $(strip $?),\
-               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?,-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/)))
+               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?),-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/))
+
 endif
 
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
@@ -4888,51 +4874,79 @@ $(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
 # Erlang and Core Erlang files.
 
 define makedep.erl
+       E = ets:new(makedep, [bag]),
+       G = digraph:new([acyclic]),
        ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
-       Modules = [{filename:basename(F, ".erl"), F} || F <- ErlFiles],
-       Add = fun (Dep, Acc) ->
-               case lists:keyfind(atom_to_list(Dep), 1, Modules) of
-                       {_, DepFile} -> [DepFile|Acc];
-                       false -> Acc
+       Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+       Add = fun (Mod, Dep) ->
+               case lists:keyfind(Dep, 1, Modules) of
+                       false -> ok;
+                       {_, DepFile} ->
+                               {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+                               ets:insert(E, {ModFile, DepFile}),
+                               digraph:add_vertex(G, Mod),
+                               digraph:add_vertex(G, Dep),
+                               digraph:add_edge(G, Mod, Dep)
                end
        end,
-       AddHd = fun (Dep, Acc) ->
-               case {Dep, lists:keymember(Dep, 2, Modules)} of
-                       {"src/" ++ _, false} -> [Dep|Acc];
-                       {"include/" ++ _, false} -> [Dep|Acc];
-                       _ -> Acc
+       AddHd = fun (F, Mod, DepFile) ->
+               case file:open(DepFile, [read]) of
+                       {error, enoent} -> ok;
+                       {ok, Fd} ->
+                               F(F, Fd, Mod),
+                               {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+                               ets:insert(E, {ModFile, DepFile})
                end
        end,
-       CompileFirst = fun (Deps) ->
-               First0 = [case filename:extension(D) of
-                       ".erl" -> filename:basename(D, ".erl");
-                       _ -> []
-               end || D <- Deps],
-               case lists:usort(First0) of
-                       [] -> [];
-                       [[]] -> [];
-                       First -> ["COMPILE_FIRST +=", [[" ", F] || F <- First], "\n"]
-               end
+       Attr = fun
+               (F, Mod, behavior, Dep) -> Add(Mod, Dep);
+               (F, Mod, behaviour, Dep) -> Add(Mod, Dep);
+               (F, Mod, compile, {parse_transform, Dep}) -> Add(Mod, Dep);
+               (F, Mod, compile, Opts) when is_list(Opts) ->
+                       case proplists:get_value(parse_transform, Opts) of
+                               undefined -> ok;
+                               Dep -> Add(Mod, Dep)
+                       end;
+               (F, Mod, include, Hrl) ->
+                       case filelib:is_file("include/" ++ Hrl) of
+                               true -> AddHd(F, Mod, "include/" ++ Hrl);
+                               false ->
+                                       case filelib:is_file("src/" ++ Hrl) of
+                                               true -> AddHd(F, Mod, "src/" ++ Hrl);
+                                               false -> false
+                                       end
+                       end;
+               (F, Mod, include_lib, "$1/include/" ++ Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+               (F, Mod, include_lib, Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+               (F, Mod, import, {Imp, _}) ->
+                       case filelib:is_file("src/" ++ atom_to_list(Imp) ++ ".erl") of
+                               false -> ok;
+                               true -> Add(Mod, Imp)
+                       end;
+               (_, _, _, _) -> ok
        end,
-       Depend = [begin
-               case epp:parse_file(F, ["include/"], []) of
-                       {ok, Forms} ->
-                               Deps = lists:usort(lists:foldl(fun
-                                       ({attribute, _, behavior, Dep}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, behaviour, Dep}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, compile, {parse_transform, Dep}}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, file, {Dep, _}}, Acc) -> AddHd(Dep, Acc);
-                                       (_, Acc) -> Acc
-                               end, [], Forms)),
-                               case Deps of
-                                       [] -> "";
-                                       _ -> [F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n", CompileFirst(Deps)]
-                               end;
-                       {error, enoent} ->
-                               []
+       MakeDepend = fun(F, Fd, Mod) ->
+               case io:parse_erl_form(Fd, undefined) of
+                       {ok, {attribute, _, Key, Value}, _} ->
+                               Attr(F, Mod, Key, Value),
+                               F(F, Fd, Mod);
+                       {eof, _} ->
+                               file:close(Fd);
+                       _ ->
+                               F(F, Fd, Mod)
                end
+       end,
+       [begin
+               Mod = list_to_atom(filename:basename(F, ".erl")),
+               {ok, Fd} = file:open(F, [read]),
+               MakeDepend(MakeDepend, Fd, Mod)
        end || F <- ErlFiles],
-       ok = file:write_file("$(1)", Depend),
+       Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+       CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+       ok = file:write_file("$(1)", [
+               [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+               "\nCOMPILE_FIRST +=", [[" ", atom_to_list(CF)] || CF <- CompileFirst], "\n"
+       ]),
        halt()
 endef
 
@@ -5069,6 +5083,11 @@ test-dir:
                $(call core_find,$(TEST_DIR)/,*.erl) -pa ebin/
 endif
 
+ifeq ($(wildcard src),)
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: clean deps test-deps
+       $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+else
 ifeq ($(wildcard ebin/test),)
 test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
 test-build:: clean deps test-deps $(PROJECT).d
@@ -5086,6 +5105,7 @@ clean-test-dir:
 ifneq ($(wildcard $(TEST_DIR)/*.beam),)
        $(gen_verbose) rm -f $(TEST_DIR)/*.beam
 endif
+endif
 
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -5103,11 +5123,14 @@ $(if $(filter-out -Werror,$1),\
                $(shell echo $1 | cut -b 2-)))
 endef
 
+define compat_erlc_opts_to_list
+       [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
 define compat_rebar_config
 {deps, [$(call comma_list,$(foreach d,$(DEPS),\
        {$(call dep_name,$d),".*",{git,"$(call dep_repo,$d)","$(call dep_commit,$d)"}}))]}.
-{erl_opts, [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$(ERLC_OPTS)),\
-       $(call compat_convert_erlc_opts,$o)))]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
 endef
 
 $(eval _compat_rebar_config = $$(compat_rebar_config))
@@ -5126,12 +5149,12 @@ MAN_SECTIONS ?= 3 7
 
 docs:: asciidoc
 
-asciidoc: distclean-asciidoc doc-deps asciidoc-guide asciidoc-manual
+asciidoc: asciidoc-guide asciidoc-manual
 
 ifeq ($(wildcard doc/src/guide/book.asciidoc),)
 asciidoc-guide:
 else
-asciidoc-guide:
+asciidoc-guide: distclean-asciidoc doc-deps
        a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
        a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
 endif
@@ -5139,7 +5162,7 @@ endif
 ifeq ($(wildcard doc/src/manual/*.asciidoc),)
 asciidoc-manual:
 else
-asciidoc-manual:
+asciidoc-manual: distclean-asciidoc doc-deps
        for f in doc/src/manual/*.asciidoc ; do \
                a2x -v -f manpage $$f ; \
        done
@@ -5154,7 +5177,7 @@ install-docs:: install-asciidoc
 install-asciidoc: asciidoc-manual
        for s in $(MAN_SECTIONS); do \
                mkdir -p $(MAN_INSTALL_PATH)/man$$s/ ; \
-               install -g 0 -o 0 -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
+               install -g `id -u` -o `id -g` -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
        done
 endif
 
@@ -5214,6 +5237,8 @@ define bs_appsrc_lib
 ]}.
 endef
 
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
 ifdef SP
 define bs_Makefile
 PROJECT = $p
@@ -5223,17 +5248,21 @@ PROJECT_VERSION = 0.0.1
 # Whitespace to be used when creating files from templates.
 SP = $(SP)
 
-include erlang.mk
 endef
 else
 define bs_Makefile
 PROJECT = $p
-include erlang.mk
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
 endef
 endif
 
 define bs_apps_Makefile
 PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
 include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)/erlang.mk
 endef
 
@@ -5527,6 +5556,7 @@ endif
        $(eval p := $(PROJECT))
        $(eval n := $(PROJECT)_sup)
        $(call render_template,bs_Makefile,Makefile)
+       $(verbose) echo "include erlang.mk" >> Makefile
        $(verbose) mkdir src/
 ifdef LEGACY
        $(call render_template,bs_appsrc,src/$(PROJECT).app.src)
@@ -5540,6 +5570,7 @@ ifneq ($(wildcard src/),)
 endif
        $(eval p := $(PROJECT))
        $(call render_template,bs_Makefile,Makefile)
+       $(verbose) echo "include erlang.mk" >> Makefile
        $(verbose) mkdir src/
 ifdef LEGACY
        $(call render_template,bs_appsrc_lib,src/$(PROJECT).app.src)
@@ -5620,12 +5651,32 @@ list-templates:
 
 C_SRC_DIR ?= $(CURDIR)/c_src
 C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
-C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT).so
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
 C_SRC_TYPE ?= shared
 
 # System type and C compiler/flags.
 
-ifeq ($(PLATFORM),darwin)
+ifeq ($(PLATFORM),msys2)
+       C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+       C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+       C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+       C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+       C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+       C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+       CC = /mingw64/bin/gcc
+       CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
        CC ?= cc
        CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes
        CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall
@@ -5640,10 +5691,15 @@ else ifeq ($(PLATFORM),linux)
        CXXFLAGS ?= -O3 -finline-functions -Wall
 endif
 
-CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
-CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+ifneq ($(PLATFORM),msys2)
+       CFLAGS += -fPIC
+       CXXFLAGS += -fPIC
+endif
 
-LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lerl_interface -lei
 
 # Verbosity.
 
@@ -5680,15 +5736,15 @@ OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
 COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
 COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
 
-app:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
 
-test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
 
-$(C_SRC_OUTPUT): $(OBJECTS)
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
        $(verbose) mkdir -p priv/
        $(link_verbose) $(CC) $(OBJECTS) \
                $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
-               -o $(C_SRC_OUTPUT)
+               -o $(C_SRC_OUTPUT_FILE)
 
 %.o: %.c
        $(COMPILE_C) $(OUTPUT_OPTION) $<
@@ -5705,13 +5761,13 @@ $(C_SRC_OUTPUT): $(OBJECTS)
 clean:: clean-c_src
 
 clean-c_src:
-       $(gen_verbose) rm -f $(C_SRC_OUTPUT) $(OBJECTS)
+       $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
 
 endif
 
 ifneq ($(wildcard $(C_SRC_DIR)),)
 $(C_SRC_ENV):
-       $(verbose) $(ERL) -eval "file:write_file(\"$(C_SRC_ENV)\", \
+       $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
                io_lib:format( \
                        \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
                        \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
@@ -5889,6 +5945,78 @@ endif
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+ifneq ($(wildcard $(TEST_DIR)),)
+       CT_SUITES ?= $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+else
+       CT_SUITES ?=
+endif
+
+# Core targets.
+
+tests:: ct
+
+distclean:: distclean-ct
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Common_test targets:" \
+               "  ct          Run all the common_test suites for this project" \
+               "" \
+               "All your common_test suites have their associated targets." \
+               "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+       -no_auto_compile \
+       -noinput \
+       -pa $(CURDIR)/ebin $(DEPS_DIR)/*/ebin $(TEST_DIR) \
+       -dir $(TEST_DIR) \
+       -logdir $(CURDIR)/logs
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP),,apps-ct)
+else
+ct: test-build $(if $(IS_APP),,apps-ct)
+       $(verbose) mkdir -p $(CURDIR)/logs/
+       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-ct:
+       $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app ct IS_APP=1; done
+endif
+
+ifndef t
+CT_EXTRA =
+else
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+       $(verbose) mkdir -p $(CURDIR)/logs/
+       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+       $(gen_verbose) rm -rf $(CURDIR)/logs/
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
 .PHONY: plt distclean-plt dialyze
 
 # Configuration.
@@ -5897,9 +6025,8 @@ DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
 export DIALYZER_PLT
 
 PLT_APPS ?=
-DIALYZER_DIRS ?= --src -r src
-DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \
-       -Wunmatched_returns # -Wunderspecs
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
 
 # Core targets.
 
@@ -5915,6 +6042,18 @@ help::
 
 # Plugin-specific targets.
 
+define filter_opts.erl
+       Opts = binary:split(<<"$1">>, <<"-">>, [global]),
+       Filtered = lists:reverse(lists:foldl(fun
+               (O = <<"pa ", _/bits>>, Acc) -> [O|Acc];
+               (O = <<"D ", _/bits>>, Acc) -> [O|Acc];
+               (O = <<"I ", _/bits>>, Acc) -> [O|Acc];
+               (_, Acc) -> Acc
+       end, [], Opts)),
+       io:format("~s~n", [[["-", O] || O <- Filtered]]),
+       halt().
+endef
+
 $(DIALYZER_PLT): deps app
        $(verbose) dialyzer --build_plt --apps erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS)
 
@@ -5928,7 +6067,7 @@ dialyze:
 else
 dialyze: $(DIALYZER_PLT)
 endif
-       $(verbose) dialyzer --no_native $(DIALYZER_DIRS) $(DIALYZER_OPTS)
+       $(verbose) dialyzer --no_native `$(call erlang,$(call filter_opts.erl,$(ERLC_OPTS)))` $(DIALYZER_DIRS) $(DIALYZER_OPTS)
 
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -5941,58 +6080,20 @@ EDOC_OPTS ?=
 
 # Core targets.
 
-docs:: distclean-edoc edoc
+ifneq ($(wildcard doc/overview.edoc),)
+docs:: edoc
+endif
 
 distclean:: distclean-edoc
 
 # Plugin-specific targets.
 
-edoc: doc-deps
+edoc: distclean-edoc doc-deps
        $(gen_verbose) $(ERL) -eval 'edoc:application($(PROJECT), ".", [$(EDOC_OPTS)]), halt().'
 
 distclean-edoc:
        $(gen_verbose) rm -f doc/*.css doc/*.html doc/*.png doc/edoc-info
 
-# Copyright (c) 2015, Erlang Solutions Ltd.
-# This file is part of erlang.mk and subject to the terms of the ISC License.
-
-.PHONY: elvis distclean-elvis
-
-# Configuration.
-
-ELVIS_CONFIG ?= $(CURDIR)/elvis.config
-
-ELVIS ?= $(CURDIR)/elvis
-export ELVIS
-
-ELVIS_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis
-ELVIS_CONFIG_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis.config
-ELVIS_OPTS ?=
-
-# Core targets.
-
-help::
-       $(verbose) printf "%s\n" "" \
-               "Elvis targets:" \
-               "  elvis       Run Elvis using the local elvis.config or download the default otherwise"
-
-distclean:: distclean-elvis
-
-# Plugin-specific targets.
-
-$(ELVIS):
-       $(gen_verbose) $(call core_http_get,$(ELVIS),$(ELVIS_URL))
-       $(verbose) chmod +x $(ELVIS)
-
-$(ELVIS_CONFIG):
-       $(verbose) $(call core_http_get,$(ELVIS_CONFIG),$(ELVIS_CONFIG_URL))
-
-elvis: $(ELVIS) $(ELVIS_CONFIG)
-       $(verbose) $(ELVIS) rock -c $(ELVIS_CONFIG) $(ELVIS_OPTS)
-
-distclean-elvis:
-       $(gen_verbose) rm -rf $(ELVIS)
-
 # Copyright (c) 2014 Dave Cottlehuber <dch@skunkwerks.at>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
@@ -6058,6 +6159,74 @@ escript:: distclean-escript deps app
 distclean-escript:
        $(gen_verbose) rm -f $(ESCRIPT_NAME)
 
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "EUnit targets:" \
+               "  eunit       Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+       case "$(COVER)" of
+               "" -> ok;
+               _ ->
+                       case cover:compile_beam_directory("ebin") of
+                               {error, _} -> halt(1);
+                               _ -> ok
+                       end
+       end,
+       case eunit:test($1, [$(EUNIT_OPTS)]) of
+               ok -> ok;
+               error -> halt(2)
+       end,
+       case "$(COVER)" of
+               "" -> ok;
+               _ ->
+                       cover:export("eunit.coverdata")
+       end,
+       halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(DEPS_DIR)/*/ebin $(APPS_DIR)/*/ebin ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build
+       $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build
+       $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(call core_find,ebin/,*.beam)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.beam)))
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+       $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP),,apps-eunit)
+       $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit:
+       $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; done
+endif
+endif
+
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
index 2ccf681c0907b4760b906eaab5165f724d832483..c462a9c34721f9f42954c21d3e20ab1dffb270eb 100644 (file)
 
 -include("amqp_client.hrl").
 
--ifdef(use_specs).
--type(state() :: any()).
--type(consume() :: #'basic.consume'{}).
--type(consume_ok() :: #'basic.consume_ok'{}).
--type(cancel() :: #'basic.cancel'{}).
--type(cancel_ok() :: #'basic.cancel_ok'{}).
--type(deliver() :: #'basic.deliver'{}).
--type(from() :: any()).
--type(reason() :: any()).
--type(ok_error() :: {ok, state()} | {error, reason(), state()}).
+-type state() :: any().
+-type consume() :: #'basic.consume'{}.
+-type consume_ok() :: #'basic.consume_ok'{}.
+-type cancel() :: #'basic.cancel'{}.
+-type cancel_ok() :: #'basic.cancel_ok'{}.
+-type deliver() :: #'basic.deliver'{}.
+-type from() :: any().
+-type reason() :: any().
+-type ok_error() :: {ok, state()} | {error, reason(), state()}.
 
--spec(init/1 :: ([any()]) -> {ok, state()}).
--spec(handle_consume/3 :: (consume(), pid(), state()) -> ok_error()).
--spec(handle_consume_ok/3 :: (consume_ok(), consume(), state()) ->
-                                  ok_error()).
--spec(handle_cancel/2 :: (cancel(), state()) -> ok_error()).
--spec(handle_server_cancel/2 :: (cancel(), state()) -> ok_error()).
--spec(handle_cancel_ok/3 :: (cancel_ok(), cancel(), state()) -> ok_error()).
--spec(handle_deliver/3 :: (deliver(), #amqp_msg{}, state()) -> ok_error()).
--spec(handle_info/2 :: (any(), state()) -> ok_error()).
--spec(handle_call/3 :: (any(), from(), state()) ->
+-spec init([any()]) -> {ok, state()}.
+-spec handle_consume(consume(), pid(), state()) -> ok_error().
+-spec handle_consume_ok(consume_ok(), consume(), state()) ->
+                                  ok_error().
+-spec handle_cancel(cancel(), state()) -> ok_error().
+-spec handle_server_cancel(cancel(), state()) -> ok_error().
+-spec handle_cancel_ok(cancel_ok(), cancel(), state()) -> ok_error().
+-spec handle_deliver(deliver(), #amqp_msg{}, state()) -> ok_error().
+-spec handle_info(any(), state()) -> ok_error().
+-spec handle_call(any(), from(), state()) ->
                            {reply, any(), state()} | {noreply, state()} |
-                            {error, reason(), state()}).
--spec(terminate/2 :: (any(), state()) -> state()).
--endif.
+                            {error, reason(), state()}.
+-spec terminate(any(), state()) -> state().
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index 230d0404fc4afb4fc807dd8b91282e6bf8ed7066..547b9fda21629ed5061eb9049e707a3ad1a621be 100644 (file)
@@ -37,7 +37,7 @@ start_link(Type, Connection, ConnName, InfraArgs, ChNumber,
                     Sup, {channel,
                           {amqp_channel, start_link,
                            [Type, Connection, ChNumber, ConsumerPid, Identity]},
-                          intrinsic, ?MAX_WAIT, worker, [amqp_channel]}),
+                          intrinsic, ?WORKER_WAIT, worker, [amqp_channel]}),
     Writer = start_writer(Sup, Type, InfraArgs, ConnName, ChNumber, ChPid),
     amqp_channel:set_writer(ChPid, Writer),
     {ok, AState} = init_command_assembler(Type),
@@ -60,7 +60,7 @@ start_writer(Sup, network, [Sock, FrameMax], ConnName, ChNumber, ChPid) ->
                      {writer, {rabbit_writer, start_link,
                                [Sock, ChNumber, FrameMax, ?PROTOCOL, ChPid,
                                 {ConnName, ChNumber}]},
-                      intrinsic, ?MAX_WAIT, worker, [rabbit_writer]}),
+                      transient, ?WORKER_WAIT, worker, [rabbit_writer]}),
     Writer.
 
 init_command_assembler(direct)  -> {ok, none};
@@ -74,4 +74,4 @@ init([{ConsumerModule, ConsumerArgs}, Identity]) ->
     {ok, {{one_for_all, 0, 1},
           [{gen_consumer, {amqp_gen_consumer, start_link,
                            [ConsumerModule, ConsumerArgs, Identity]},
-           intrinsic, ?MAX_WAIT, worker, [amqp_gen_consumer]}]}}.
+           intrinsic, ?WORKER_WAIT, worker, [amqp_gen_consumer]}]}}.
index 56c1ee079c95bdc018134b9c5041ea94a359699c..8d1450107fa416eaa644b96bf06ff8d4c39b55c8 100644 (file)
@@ -1,6 +1,6 @@
 {application, amqp_client,
  [{description, "RabbitMQ AMQP Client"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, [amqp_sup]},
   {env, [{prefer_ipv6, false},
index 148d302e705360d2ff1a28f9f7575dbfd30eb03d..0dc0b7076c883af97a883daef60a9069c56d9129 100644 (file)
 -include("amqp_client_internal.hrl").
 
 -export([open_channel/1, open_channel/2, open_channel/3, register_blocked_handler/2]).
--export([start/1, close/1, close/2, close/3]).
+-export([start/1, start/2, close/1, close/2, close/3, close/4]).
 -export([error_atom/1]).
 -export([info/2, info_keys/1, info_keys/0]).
+-export([connection_name/1]).
 -export([socket_adapter_info/2]).
 
 -define(DEFAULT_CONSUMER, {amqp_selective_consumer, []}).
 %% where
 %%      Params = amqp_params_network() | amqp_params_direct()
 %%      Connection = pid()
+%% @doc same as {@link amqp_connection:start/2. start(Params, undefined)}
+start(AmqpParams) ->
+    start(AmqpParams, undefined).
+
+%% @spec (Params, ConnectionName) -> {ok, Connection} | {error, Error}
+%% where
+%%      Params = amqp_params_network() | amqp_params_direct()
+%%      ConnectionName = undefined | binary()
+%%      Connection = pid()
 %% @doc Starts a connection to an AMQP server. Use network params to
 %% connect to a remote AMQP server or direct params for a direct
 %% connection to a RabbitMQ server, assuming that the server is
 %% running in the same process space.  If the port is set to 'undefined',
 %% the default ports will be selected depending on whether this is a
 %% normal or an SSL connection.
-start(AmqpParams) ->
+%% If ConnectionName is binary - it will be added to client_properties as 
+%% user specified connection name.
+start(AmqpParams, ConnName) when ConnName == undefined; is_binary(ConnName) ->
     ensure_started(),
     AmqpParams1 =
         case AmqpParams of
@@ -158,9 +170,24 @@ start(AmqpParams) ->
             _ ->
                 AmqpParams
         end,
-    {ok, _Sup, Connection} = amqp_sup:start_connection_sup(AmqpParams1),
+    AmqpParams2 = set_connection_name(ConnName, AmqpParams1),
+    {ok, _Sup, Connection} = amqp_sup:start_connection_sup(AmqpParams2),
     amqp_gen_connection:connect(Connection).
 
+set_connection_name(undefined, Params) -> Params;
+set_connection_name(ConnName, 
+                    #amqp_params_network{client_properties = Props} = Params) ->
+    Params#amqp_params_network{
+        client_properties = [
+            {<<"connection_name">>, longstr, ConnName} | Props
+        ]};
+set_connection_name(ConnName, 
+                    #amqp_params_direct{client_properties = Props} = Params) ->
+    Params#amqp_params_direct{
+        client_properties = [
+            {<<"connection_name">>, longstr, ConnName} | Props
+        ]}.
+
 %% Usually the amqp_client application will already be running. We
 %% check whether that is the case by invoking an undocumented function
 %% which does not require a synchronous call to the application
@@ -171,13 +198,13 @@ ensure_started() ->
     [ensure_started(App) || App <- [xmerl, rabbit_common, amqp_client]].
 
 ensure_started(App) ->
-    case application_controller:get_master(App) of
-        undefined -> case application:start(App) of
-                         ok                              -> ok;
-                         {error, {already_started, App}} -> ok;
-                         {error, _} = E                  -> throw(E)
-                     end;
-        _         -> ok
+    case is_pid(application_controller:get_master(App)) andalso amqp_sup:is_ready() of
+        true  -> ok;
+        false -> case application:start(App) of
+                     ok                              -> ok;
+                     {error, {already_started, App}} -> ok;
+                     {error, _} = E                  -> throw(E)
+                 end
     end.
 
 %%---------------------------------------------------------------------------
@@ -342,3 +369,18 @@ info_keys() ->
 %% based on the socket for the protocol given.
 socket_adapter_info(Sock, Protocol) ->
     amqp_direct_connection:socket_adapter_info(Sock, Protocol).
+
+%% @spec (ConnectionPid) -> ConnectionName
+%% where
+%%      ConnectionPid = pid()
+%%      ConnectionName = binary()
+%% @doc Returns user specified connection name from client properties
+connection_name(ConnectionPid) ->
+    ClientProperties = case info(ConnectionPid, [amqp_params]) of
+        [{_, #amqp_params_network{client_properties = Props}}] -> Props;
+        [{_, #amqp_params_direct{client_properties = Props}}] -> Props
+    end,
+    case lists:keyfind(<<"connection_name">>, 1, ClientProperties) of
+        {<<"connection_name">>, _, ConnName} -> ConnName;
+        false                                -> undefined
+    end.
index 9aeb3110159c39b9610dd62cd39e26dee34329da..636e81a4814bf3231131323acb15b268c98e02fc 100644 (file)
@@ -48,7 +48,7 @@ start_channels_manager(Sup, Conn, ConnName, Type) ->
                 Sup,
                 {channels_manager, {amqp_channels_manager, start_link,
                                     [Conn, ConnName, ChSupSup]},
-                 transient, ?MAX_WAIT, worker, [amqp_channels_manager]}).
+                 transient, ?WORKER_WAIT, worker, [amqp_channels_manager]}).
 
 start_infrastructure_fun(Sup, Conn, network) ->
     fun (Sock, ConnName) ->
@@ -60,13 +60,13 @@ start_infrastructure_fun(Sup, Conn, network) ->
                   {writer,
                    {rabbit_writer, start_link,
                     [Sock, 0, ?FRAME_MIN_SIZE, ?PROTOCOL, Conn, ConnName]},
-                   transient, ?MAX_WAIT, worker, [rabbit_writer]}),
+                   transient, ?WORKER_WAIT, worker, [rabbit_writer]}),
             {ok, _Reader} =
                 supervisor2:start_child(
                   Sup,
                   {main_reader, {amqp_main_reader, start_link,
                                  [Sock, Conn, ChMgr, AState, ConnName]},
-                   transient, ?MAX_WAIT, worker, [amqp_main_reader]}),
+                   transient, ?WORKER_WAIT, worker, [amqp_main_reader]}),
             {ok, ChMgr, Writer}
     end;
 start_infrastructure_fun(Sup, Conn, direct) ->
@@ -76,7 +76,7 @@ start_infrastructure_fun(Sup, Conn, direct) ->
                 supervisor2:start_child(
                   Sup,
                   {collector, {rabbit_queue_collector, start_link, [ConnName]},
-                   transient, ?MAX_WAIT, worker, [rabbit_queue_collector]}),
+                   transient, ?WORKER_WAIT, worker, [rabbit_queue_collector]}),
             {ok, ChMgr, Collector}
     end.
 
index 52d5fa7ac7dbca7e86a5771236a532bd7bc2d363..15491b82786ce436690ff55fb8edc3740add6449 100644 (file)
@@ -108,6 +108,7 @@ i(port,         #state{adapter_info = I}) -> I#amqp_adapter_info.port;
 i(peer_host,    #state{adapter_info = I}) -> I#amqp_adapter_info.peer_host;
 i(peer_port,    #state{adapter_info = I}) -> I#amqp_adapter_info.peer_port;
 i(name,         #state{adapter_info = I}) -> I#amqp_adapter_info.name;
+i(internal_user, #state{user = U}) -> U;
 
 i(Item, _State) -> throw({bad_argument, Item}).
 
@@ -193,10 +194,14 @@ maybe_ssl_info(Sock) ->
 ssl_info(Sock) ->
     {Protocol, KeyExchange, Cipher, Hash} =
         case rabbit_net:ssl_info(Sock) of
-            {ok, Infos} -> {_, P}         = lists:keyfind(protocol, 1, Infos),
-                           {_, {K, C, H}} = lists:keyfind(cipher_suite, 1, Infos),
-                           {P, K, C, H};
-            _           -> {unknown, unknown, unknown, unknown}
+            {ok, Infos} ->
+                {_, P} = lists:keyfind(protocol, 1, Infos),
+                case lists:keyfind(cipher_suite, 1, Infos) of
+                    {_,{K, C, H}}    -> {P, K, C, H};
+                    {_,{K, C, H, _}} -> {P, K, C, H}
+                end;
+            _           ->
+                {unknown, unknown, unknown, unknown}
         end,
     [{ssl_protocol,     Protocol},
      {ssl_key_exchange, KeyExchange},
index 34b14239f1297b3b95ea6e70e03e0572138943e7..793bfa3b866c1c2caac473511af17babd0753c8b 100644 (file)
@@ -93,6 +93,9 @@ handle_deliver(M, A, DeliveryCtx, C) ->
 
 
 %% @private
+handle_info({'DOWN', _MRef, process, C, normal}, C) ->
+    %% The channel was closed.
+    {ok, C};
 handle_info({'DOWN', _MRef, process, C, Info}, C) ->
     {error, {consumer_died, Info}, C};
 handle_info({'DOWN', MRef, process, Pid, Info}, C) ->
index c2fa89dae79a22135b4d4e6a72132e9712002cfc..1a02981bfb450162b979648e750cac8e5b23c8b0 100644 (file)
@@ -21,7 +21,7 @@
 
 -behaviour(supervisor2).
 
--export([start_link/0, start_connection_sup/1]).
+-export([start_link/0, is_ready/0, start_connection_sup/1]).
 -export([init/1]).
 
 %%---------------------------------------------------------------------------
@@ -31,6 +31,9 @@
 start_link() ->
     supervisor2:start_link({local, amqp_sup}, ?MODULE, []).
 
+is_ready() ->
+    whereis(amqp_sup) =/= undefined.
+
 start_connection_sup(AmqpParams) ->
     supervisor2:start_child(amqp_sup, [AmqpParams]).
 
diff --git a/rabbitmq-server/deps/amqp_client/src/rabbit_ct_client_helpers.erl b/rabbitmq-server/deps/amqp_client/src/rabbit_ct_client_helpers.erl
new file mode 100644 (file)
index 0000000..fee4ea2
--- /dev/null
@@ -0,0 +1,243 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_ct_client_helpers).
+
+-include_lib("common_test/include/ct.hrl").
+-include("include/amqp_client.hrl").
+
+-export([
+    setup_steps/0,
+    teardown_steps/0,
+    start_channels_managers/1,
+    stop_channels_managers/1,
+
+    open_connection/2,
+    open_unmanaged_connection/1, open_unmanaged_connection/2,
+    close_connection/1,
+    open_channel/2, open_channel/1,
+    close_channel/1,
+    open_connection_and_channel/2, open_connection_and_channel/1,
+    close_connection_and_channel/2,
+    close_channels_and_connection/2,
+
+    publish/3, consume/3, fetch/3
+  ]).
+
+%% -------------------------------------------------------------------
+%% Client setup/teardown steps.
+%% -------------------------------------------------------------------
+
+setup_steps() ->
+    [
+      fun start_channels_managers/1
+    ].
+
+teardown_steps() ->
+    [
+      fun stop_channels_managers/1
+    ].
+
+start_channels_managers(Config) ->
+    NodeConfigs = rabbit_ct_broker_helpers:get_node_configs(Config),
+    NodeConfigs1 = [start_channels_manager(NC) || NC <- NodeConfigs],
+    rabbit_ct_helpers:set_config(Config, {rmq_nodes, NodeConfigs1}).
+
+start_channels_manager(NodeConfig) ->
+    Pid = erlang:spawn(
+      fun() -> channels_manager(NodeConfig, undefined, []) end),
+    rabbit_ct_helpers:set_config(NodeConfig, {channels_manager, Pid}).
+
+stop_channels_managers(Config) ->
+    NodeConfigs = rabbit_ct_broker_helpers:get_node_configs(Config),
+    NodeConfigs1 = [stop_channels_manager(NC) || NC <- NodeConfigs],
+    rabbit_ct_helpers:set_config(Config, {rmq_nodes, NodeConfigs1}).
+
+stop_channels_manager(NodeConfig) ->
+    Pid = ?config(channels_manager, NodeConfig),
+    Pid ! stop,
+    proplists:delete(channels_manager, NodeConfig).
+
+channels_manager(NodeConfig, ConnTuple, Channels) ->
+    receive
+        {open_connection, From} ->
+            {Conn1, _} = ConnTuple1 = open_conn(NodeConfig, ConnTuple),
+            From ! Conn1,
+            channels_manager(NodeConfig, ConnTuple1, Channels);
+        {open_channel, From} ->
+            {Conn1, _} = ConnTuple1 = open_conn(NodeConfig, ConnTuple),
+            {ok, Ch} = amqp_connection:open_channel(Conn1),
+            ChMRef = erlang:monitor(process, Ch),
+            From ! Ch,
+            channels_manager(NodeConfig, ConnTuple1,
+              [{Ch, ChMRef} | Channels]);
+        {close_everything, From} ->
+            close_everything(ConnTuple, Channels),
+            From ! ok,
+            channels_manager(NodeConfig, undefined, []);
+        {'DOWN', ConnMRef, process, Conn, _}
+        when {Conn, ConnMRef} =:= ConnTuple ->
+            channels_manager(NodeConfig, undefined, Channels);
+        {'DOWN', ChMRef, process, Ch, _} ->
+            Channels1 = Channels -- [{Ch, ChMRef}],
+            channels_manager(NodeConfig, ConnTuple, Channels1);
+        stop ->
+            close_everything(ConnTuple, Channels);
+        Unhandled ->
+            ct:pal(?LOW_IMPORTANCE,
+              "Channels manager ~p: unhandled message: ~p",
+              [self(), Unhandled]),
+            channels_manager(NodeConfig, ConnTuple, Channels)
+    end.
+
+open_conn(NodeConfig, undefined) ->
+    Port = ?config(tcp_port_amqp, NodeConfig),
+    Params = #amqp_params_network{port = Port},
+    {ok, Conn} = amqp_connection:start(Params),
+    MRef = erlang:monitor(process, Conn),
+    {Conn, MRef};
+open_conn(NodeConfig, {Conn, _} = ConnTuple) ->
+    case erlang:is_process_alive(Conn) of
+        true  -> ConnTuple;
+        false -> open_conn(NodeConfig, undefined)
+    end.
+
+close_everything(Conn, [{Ch, MRef} | Rest]) ->
+    case erlang:is_process_alive(Ch) of
+        true ->
+            erlang:demonitor(MRef, [flush]),
+            amqp_channel:close(Ch);
+        false ->
+            ok
+    end,
+    close_everything(Conn, Rest);
+close_everything({Conn, MRef}, []) ->
+    case erlang:is_process_alive(Conn) of
+        true ->
+            erlang:demonitor(MRef),
+            amqp_connection:close(Conn);
+        false ->
+            ok
+    end;
+close_everything(undefined, []) ->
+    ok.
+
+%% -------------------------------------------------------------------
+%% Public API.
+%% -------------------------------------------------------------------
+
+open_connection(Config, Node) ->
+    Pid = rabbit_ct_broker_helpers:get_node_config(Config, Node,
+      channels_manager),
+    Pid ! {open_connection, self()},
+    receive
+        Conn when is_pid(Conn) -> Conn
+    end.
+
+open_unmanaged_connection(Config) ->
+    open_unmanaged_connection(Config, 0).
+
+open_unmanaged_connection(Config, Node) ->
+    open_unmanaged_connection(Config, Node, <<"/">>).
+
+open_unmanaged_connection(Config, Node, VHost) ->
+    Port = rabbit_ct_broker_helpers:get_node_config(Config, Node,
+      tcp_port_amqp),
+    Params = #amqp_params_network{port = Port, virtual_host = VHost},
+    case amqp_connection:start(Params) of
+        {ok, Conn}         -> Conn;
+        {error, _} = Error -> Error
+    end.
+
+open_channel(Config) ->
+    open_channel(Config, 0).
+
+open_channel(Config, Node) ->
+    Pid = rabbit_ct_broker_helpers:get_node_config(Config, Node,
+      channels_manager),
+    Pid ! {open_channel, self()},
+    receive
+        Ch when is_pid(Ch) -> Ch
+    end.
+
+open_connection_and_channel(Config) ->
+    open_connection_and_channel(Config, 0).
+
+open_connection_and_channel(Config, Node) ->
+    Conn = open_connection(Config, Node),
+    Ch = open_channel(Config, Node),
+    {Conn, Ch}.
+
+close_channel(Ch) ->
+    case is_process_alive(Ch) of
+        true  -> amqp_channel:close(Ch);
+        false -> ok
+    end.
+
+close_connection(Conn) ->
+    case is_process_alive(Conn) of
+        true  -> amqp_connection:close(Conn);
+        false -> ok
+    end.
+
+close_connection_and_channel(Conn, Ch) ->
+    _ = close_channel(Ch),
+    case close_connection(Conn) of
+        ok      -> ok;
+        closing -> ok
+    end.
+
+close_channels_and_connection(Config, Node) ->
+    Pid = rabbit_ct_broker_helpers:get_node_config(Config, Node,
+      channels_manager),
+    Pid ! {close_everything, self()},
+    receive
+        ok -> ok
+    end.
+
+publish(Ch, QName, Count) ->
+    amqp_channel:call(Ch, #'confirm.select'{}),
+    [amqp_channel:call(Ch,
+                       #'basic.publish'{routing_key = QName},
+                       #amqp_msg{props   = #'P_basic'{delivery_mode = 2},
+                                 payload = list_to_binary(integer_to_list(I))})
+     || I <- lists:seq(1, Count)],
+    amqp_channel:wait_for_confirms(Ch).
+
+consume(Ch, QName, Count) ->
+    amqp_channel:subscribe(Ch, #'basic.consume'{queue = QName, no_ack = true},
+                           self()),
+    CTag = receive #'basic.consume_ok'{consumer_tag = C} -> C end,
+    [begin
+         Exp = list_to_binary(integer_to_list(I)),
+         receive {#'basic.deliver'{consumer_tag = CTag},
+                  #amqp_msg{payload = Exp}} ->
+                 ok
+         after 500 ->
+                 exit(timeout)
+         end
+     end|| I <- lists:seq(1, Count)],
+    #'queue.declare_ok'{message_count = 0}
+        = amqp_channel:call(Ch, #'queue.declare'{queue   = QName,
+                                                 durable = true}),
+    amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag}),
+    ok.
+
+fetch(Ch, QName, Count) ->
+    [{#'basic.get_ok'{}, _} =
+         amqp_channel:call(Ch, #'basic.get'{queue = QName}) ||
+        _ <- lists:seq(1, Count)],
+    ok.
diff --git a/rabbitmq-server/deps/amqp_client/test.mk b/rabbitmq-server/deps/amqp_client/test.mk
deleted file mode 100644 (file)
index 8c5825e..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-# The contents of this file are subject to the Mozilla Public License
-# Version 1.1 (the "License"); you may not use this file except in
-# compliance with the License. You may obtain a copy of the License at
-# http://www.mozilla.org/MPL/
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-# License for the specific language governing rights and limitations
-# under the License.
-#
-# The Original Code is RabbitMQ.
-#
-# The Initial Developer of the Original Code is GoPivotal, Inc.
-# Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
-#
-
-IS_SUCCESS:=egrep -E "(All .+ tests (successful|passed).|Test passed.)"
-TESTING_MESSAGE:=-eval "error_logger:tty(false), error_logger:logfile({open, \"$(TMPDIR)/erlang-client-tests.log\"}), io:format(\"~nTesting in progress. Please wait...~n~n\")."
-
-NODE_NAME := amqp_client
-RUN := erl -pa test -sname $(NODE_NAME)
-
-MKTEMP=$$(mktemp $(TMPDIR)/tmp.XXXXXXXXXX)
-
-ifdef SSL_CERTS_DIR
-SSL := true
-ALL_SSL := $(MAKE) --no-print-directory test_ssl
-ALL_SSL_COVERAGE := $(MAKE) --no-print-directory test_ssl_coverage
-SSL_BROKER_ARGS := -rabbit ssl_listeners [{\\\"0.0.0.0\\\",5671},{\\\"::1\\\",5671}] \
-       -rabbit ssl_options [{cacertfile,\\\"$(SSL_CERTS_DIR)/testca/cacert.pem\\\"},{certfile,\\\"$(SSL_CERTS_DIR)/server/cert.pem\\\"},{keyfile,\\\"$(SSL_CERTS_DIR)/server/key.pem\\\"},{verify,verify_peer},{fail_if_no_peer_cert,true}]
-SSL_CLIENT_ARGS := -erlang_client_ssl_dir $(SSL_CERTS_DIR)
-else
-SSL := @echo No SSL_CERTS_DIR defined. && false
-ALL_SSL := true
-ALL_SSL_COVERAGE := true
-SSL_BROKER_ARGS :=
-SSL_CLIENT_ARGS :=
-endif
-
-all_tests:
-       $(test_verbose) rm -f failed-$@
-       -$(verbose) $(MAKE) --no-print-directory test_suites || touch failed-$@
-       -$(verbose) $(MAKE) --no-print-directory test_common_package || touch failed-$@
-       -$(verbose) $(MAKE) --no-print-directory test_direct || touch failed-$@
-       $(verbose) ! rm failed-$@ 2>/dev/null
-
-test_suites:
-       $(test_verbose) rm -f failed-$@
-       -$(verbose) $(MAKE) --no-print-directory test_network || touch failed-$@
-       -$(verbose) $(MAKE) --no-print-directory test_remote_direct || touch failed-$@
-       -$(verbose) $(ALL_SSL) || touch failed-$@
-       $(verbose) ! rm failed-$@ 2>/dev/null
-
-test_suites_coverage:
-       $(test_verbose) rm -f failed-$@
-       -$(verbose) $(MAKE) --no-print-directory test_network_coverage || touch failed-$@
-       -$(verbose) $(MAKE) --no-print-directory test_direct_coverage || touch failed-$@
-       $(ALL_SSL_COVERAGE) || touch failed-$@
-       $(verbose) ! rm failed-$@ 2>/dev/null
-
-## Starts a broker, configures users and runs the tests on the same node
-run_test_in_broker:
-       $(verbose) $(MAKE) --no-print-directory start_test_broker_node
-       $(verbose) $(MAKE) --no-print-directory unboot_broker
-       $(verbose) rm -f failed-$@
-       -$(verbose) TMPFILE=$(MKTEMP) && \
-               ( echo "Redirecting output to $$TMPFILE" && \
-               $(MAKE) --no-print-directory run-node \
-               RABBITMQ_SERVER_START_ARGS="-pa test $(SSL_BROKER_ARGS) \
-               -noshell -s rabbit $(RUN_TEST_ARGS) -s init stop" 2>&1 | \
-               tee $$TMPFILE && \
-               $(IS_SUCCESS) $$TMPFILE ) || touch failed-$@; \
-               rm $$TMPFILE
-       -$(verbose) $(MAKE) --no-print-directory boot_broker || touch failed-$@
-       -$(verbose) $(MAKE) --no-print-directory stop_test_broker_node || touch failed-$@
-       $(verbose) ! rm failed-$@ 2>/dev/null
-
-## Starts a broker, configures users and runs the tests from a different node
-run_test_detached: start_test_broker_node
-       $(verbose) rm -f failed-$@
-       -$(verbose) TMPFILE=$(MKTEMP) && \
-               ( echo "Redirecting output to $$TMPFILE" && \
-               MAKE=$(MAKE) \
-               ERL_LIBS='$(CURDIR)/$(DIST_DIR):$(DIST_ERL_LIBS)' \
-               $(RUN) -noinput $(TESTING_MESSAGE) \
-               $(SSL_CLIENT_ARGS) $(RUN_TEST_ARGS) -s init stop 2>&1 | \
-               tee $$TMPFILE && \
-               $(IS_SUCCESS) $$TMPFILE ) || touch failed-$@; \
-               rm $$TMPFILE
-       -$(verbose) $(MAKE) --no-print-directory stop_test_broker_node || touch failed-$@
-       $(verbose) ! rm failed-$@ 2>/dev/null
-
-## Starts a broker, configures users and runs the tests from a different node
-run_test_foreground: start_test_broker_node
-       $(verbose) rm -f failed-$@
-       -$(verbose) MAKE=$(MAKE) $(RUN) -noinput $(TESTING_MESSAGE) \
-          $(SSL_CLIENT_ARGS) $(RUN_TEST_ARGS) -s init stop || touch failed-$@
-       -$(verbose) $(MAKE) --no-print-directory stop_test_broker_node || touch failed-$@
-       $(verbose) ! rm failed-$@ 2>/dev/null
-
-start_test_broker_node: boot_broker
-       $(exec_verbose) sleep 1
-       $(verbose) $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) delete_user test_user_no_perm || :
-       $(verbose) $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) add_user test_user_no_perm test_user_no_perm
-       $(verbose) sleep 1
-
-stop_test_broker_node:
-       $(exec_verbose) sleep 1
-       -$(verbose) $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) delete_user test_user_no_perm
-       $(verbose) $(MAKE) --no-print-directory unboot_broker
-
-boot_broker: virgin-test-tmpdir
-       $(exec_verbose) $(MAKE) --no-print-directory start-background-node \
-               RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS) \
-               $(SSL_BROKER_ARGS)"
-       $(verbose) $(MAKE) --no-print-directory start-rabbit-on-node
-
-unboot_broker:
-       $(exec_verbose) $(MAKE) --no-print-directory stop-rabbit-on-node
-       $(verbose) $(MAKE) --no-print-directory stop-node
-
-ssl:
-       $(verbose) $(SSL)
-
-test_ssl: test-dist ssl
-       $(test_verbose) $(MAKE) --no-print-directory run_test_detached \
-               AMQP_CLIENT_TEST_CONNECTION_TYPE="network_ssl" \
-               RUN_TEST_ARGS="-s amqp_client_SUITE test"
-
-test_network: test-dist
-       $(test_verbose) $(MAKE) --no-print-directory run_test_detached \
-               AMQP_CLIENT_TEST_CONNECTION_TYPE="network" \
-               RUN_TEST_ARGS="-s amqp_client_SUITE test"
-
-test_direct: test-dist
-       $(test_verbose) $(MAKE) --no-print-directory run_test_in_broker \
-               AMQP_CLIENT_TEST_CONNECTION_TYPE="direct" \
-               RUN_TEST_ARGS="-s amqp_client_SUITE test"
-
-test_remote_direct: test-dist
-       $(test_verbose) $(MAKE) --no-print-directory run_test_detached \
-               AMQP_CLIENT_TEST_CONNECTION_TYPE="direct" \
-               RUN_TEST_ARGS="-s amqp_client_SUITE test"
-
-test_common_package: test-dist
-       $(test_verbose) $(MAKE) --no-print-directory run_test_detached \
-               RUN="erl -pa test" \
-               AMQP_CLIENT_TEST_CONNECTION_TYPE="network" \
-               RUN_TEST_ARGS="-s amqp_client_SUITE test"
-       $(verbose) $(MAKE) --no-print-directory run_test_detached \
-               RUN="erl -pa test -sname amqp_client" \
-               AMQP_CLIENT_TEST_CONNECTION_TYPE="direct" \
-               RUN_TEST_ARGS="-s amqp_client_SUITE test"
-
-test_ssl_coverage: test-dist ssl
-       $(test_verbose) $(MAKE) --no-print-directory run_test_detached \
-               AMQP_CLIENT_TEST_CONNECTION_TYPE="network_ssl" \
-               RUN_TEST_ARGS="-s amqp_client_SUITE test_coverage"
-
-test_network_coverage: test-dist
-       $(test_verbose) $(MAKE) --no-print-directory run_test_detached \
-               AMQP_CLIENT_TEST_CONNECTION_TYPE="network" \
-               RUN_TEST_ARGS="-s amqp_client_SUITE test_coverage"
-
-test_remote_direct_coverage: test-dist
-       $(test_verbose) $(MAKE) --no-print-directory run_test_detached \
-               AMQP_CLIENT_TEST_CONNECTION_TYPE="direct" \
-               RUN_TEST_ARGS="-s amqp_client_SUITE test_coverage"
-
-test_direct_coverage: test-dist
-       $(test_verbose) $(MAKE) --no-print-directory run_test_in_broker \
-               AMQP_CLIENT_TEST_CONNECTION_TYPE="direct" \
-               RUN_TEST_ARGS="-s amqp_client_SUITE test_coverage"
diff --git a/rabbitmq-server/deps/amqp_client/test/Makefile b/rabbitmq-server/deps/amqp_client/test/Makefile
deleted file mode 100644 (file)
index beef64c..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-# The contents of this file are subject to the Mozilla Public License
-# Version 1.1 (the "License"); you may not use this file except in
-# compliance with the License. You may obtain a copy of the License at
-# http://www.mozilla.org/MPL/
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-# License for the specific language governing rights and limitations
-# under the License.
-#
-# The Original Code is RabbitMQ.
-#
-# The Initial Developer of the Original Code is GoPivotal, Inc.
-# Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
-#
-
-TEST_SOURCES=$(wildcard *.erl)
-TEST_TARGETS=$(patsubst %.erl, %.beam, $(TEST_SOURCES))
-INCLUDES=$(wildcard ../$(INCLUDE_DIR)/*.hrl)
-DEPS_DIR=../deps
-
-ERLC_OPTS=-I ../$(INCLUDE_DIR) -o ./ -Wall -v +debug_info
-LIBS_PATH=ERL_LIBS=$(DEPS_DIR)
-
-all: compile
-
-compile: $(TEST_TARGETS)
-
-%.beam: %.erl $(DEPS_DIR)/$(COMMON_PACKAGE_DIR) $(INCLUDES)
-       $(LIBS_PATH) erlc $(ERLC_OPTS) $<
-
-clean:
-       rm -f *.beam
diff --git a/rabbitmq-server/deps/amqp_client/test/amqp_client_SUITE.erl b/rabbitmq-server/deps/amqp_client/test/amqp_client_SUITE.erl
deleted file mode 100644 (file)
index aaa7f94..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(amqp_client_SUITE).
-
--export([test_coverage/0]).
-
--include_lib("eunit/include/eunit.hrl").
-
--define(FUNCTION,
-        begin
-            catch throw(x),
-            Fun = case erlang:get_stacktrace() of
-                      [{_, F, _}    | _] -> F; %% < R15
-                      [{_, F, _, _} | _] -> F %% >= R15
-                  end,
-            list_to_atom(string:strip(atom_to_list(Fun), right, $_))
-        end).
-
--define(RUN(Props), run(?FUNCTION, Props)).
-
-%%---------------------------------------------------------------------------
-%% Tests
-%%---------------------------------------------------------------------------
-
-amqp_uri_parse_test_()                  -> ?RUN([]).
-route_destination_test_()               -> ?RUN([]).
-basic_get_test_()                       -> ?RUN([]).
-basic_get_ipv6_test_()                  -> ?RUN([]).
-basic_return_test_()                    -> ?RUN([]).
-simultaneous_close_test_()              -> ?RUN([repeat]).
-basic_qos_test_()                       -> ?RUN([]).
-basic_recover_test_()                   -> ?RUN([]).
-basic_consume_test_()                   -> ?RUN([]).
-consume_notification_test_()            -> ?RUN([]).
-basic_nack_test_()                      -> ?RUN([]).
-large_content_test_()                   -> ?RUN([]).
-lifecycle_test_()                       -> ?RUN([]).
-direct_no_user_test_()                  -> ?RUN([]).
-direct_no_password_test_()              -> ?RUN([]).
-direct_no_vhost_test_()                 -> ?RUN([]).
-network_no_vhost_test_()                -> ?RUN([]).
-nowait_exchange_declare_test_()         -> ?RUN([]).
-channel_repeat_open_close_test_()       -> ?RUN([]).
-channel_multi_open_close_test_()        -> ?RUN([]).
-basic_ack_test_()                       -> ?RUN([]).
-basic_ack_call_test_()                  -> ?RUN([]).
-channel_lifecycle_test_()               -> ?RUN([]).
-queue_unbind_test_()                    -> ?RUN([]).
-sync_method_serialization_test_()       -> ?RUN([]).
-async_sync_method_serialization_test_() -> ?RUN([]).
-sync_async_method_serialization_test_() -> ?RUN([]).
-teardown_test_()                        -> ?RUN([repeat]).
-rpc_test_()                             -> ?RUN([]).
-rpc_client_test_()                      -> ?RUN([]).
-pub_and_close_test_()                   -> ?RUN([]).
-channel_tune_negotiation_test_()        -> ?RUN([]).
-confirm_test_()                         -> ?RUN([]).
-confirm_barrier_test_()                 -> ?RUN([]).
-confirm_select_before_wait_test_()      -> ?RUN([]).
-confirm_barrier_timeout_test_()         -> ?RUN([]).
-confirm_barrier_die_timeout_test_()     -> ?RUN([]).
-default_consumer_test_()                -> ?RUN([]).
-subscribe_nowait_test_()                -> ?RUN([]).
-connection_blocked_network_test_()      -> ?RUN([]).
-
-non_existent_exchange_test_()           -> ?RUN([negative]).
-bogus_rpc_test_()                       -> ?RUN([negative, repeat]).
-hard_error_test_()                      -> ?RUN([negative, repeat]).
-non_existent_user_test_()               -> ?RUN([negative]).
-invalid_password_test_()                -> ?RUN([negative]).
-non_existent_vhost_test_()              -> ?RUN([negative]).
-no_permission_test_()                   -> ?RUN([negative]).
-channel_writer_death_test_()            -> ?RUN([negative]).
-connection_failure_test_()              -> ?RUN([negative]).
-channel_death_test_()                   -> ?RUN([negative]).
-shortstr_overflow_property_test_()      -> ?RUN([negative]).
-shortstr_overflow_field_test_()         -> ?RUN([negative]).
-command_invalid_over_channel_test_()    -> ?RUN([negative]).
-command_invalid_over_channel0_test_()   -> ?RUN([negative]).
-
-%%---------------------------------------------------------------------------
-%% Internal
-%%---------------------------------------------------------------------------
-
-run(TestName, Props) ->
-    RepeatCount = case proplists:get_value(repeat, Props, false) of
-                      true                          -> 100;
-                      Number when is_number(Number) -> Number;
-                      false                         -> 1
-                  end,
-    Module = case proplists:get_bool(negative, Props) of
-                 true  -> negative_test_util;
-                 false -> test_util
-             end,
-    {timeout, proplists:get_value(timeout, Props, 60),
-     fun () ->
-             lists:foreach(
-                 fun (_) ->
-                         try erlang:apply(Module, TestName, []) of
-                             Ret -> Ret
-                         catch
-                             exit:normal -> ok
-                         end
-                 end, lists:seq(1, RepeatCount))
-     end}.
-
-%%---------------------------------------------------------------------------
-%% Coverage
-%%---------------------------------------------------------------------------
-
-test_coverage() ->
-    rabbit_misc:enable_cover(),
-    test(),
-    rabbit_misc:report_cover().
diff --git a/rabbitmq-server/deps/amqp_client/test/amqp_dbg.erl b/rabbitmq-server/deps/amqp_client/test/amqp_dbg.erl
deleted file mode 100644 (file)
index 6bd9b07..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(amqp_dbg).
-
--include_lib("stdlib/include/ms_transform.hrl").
-
--export([tracer/0, all/0, c_all/0]).
--export([supervision/0, c_supervision/0,
-         connection_lifecycle/0, c_connection_lifecycle/0,
-         channels_manager_lifecycle/0, c_channels_manager_lifecycle/0,
-         channel_lifecycle/0, c_channel_lifecycle/0,
-         methods/0, c_methods/0]).
-
-
-tracer() ->
-    Ret = dbg:tracer(),
-    {ok, _} = dbg:p(all, c),
-    Ret.
-
-all() ->
-    tpl_list(all_args()).
-
-c_all() ->
-    ctpl_list(all_args()).
-
-supervision() ->
-    tpl_list(sup_args()).
-
-c_supervision() ->
-    ctpl_list(sup_args()).
-
-connection_lifecycle() ->
-    tpl_list(cl_args()).
-
-c_connection_lifecycle() ->
-    ctpl_list(cl_args()).
-
-channels_manager_lifecycle() ->
-    tpl_list(cml_args()).
-
-c_channels_manager_lifecycle() ->
-    ctpl_list(cml_args()).
-
-channel_lifecycle() ->
-    tpl_list(cl_args()).
-
-c_channel_lifecycle() ->
-    ctpl_list(cl_args()).
-
-methods() ->
-    tpl_list(m_args()).
-
-c_methods() ->
-    ctpl_list(m_args()).
-
-%%---------------------------------------------------------------------------
-%% Internal plumbing
-%%---------------------------------------------------------------------------
-
-all_args() ->
-    sup_args() ++ ncl_args() ++ cml_args() ++ cl_args() ++
-        m_args().
-
-sup_args() ->
-    [{amqp_connection_sup, start_link, return_ms()},
-     {amqp_connection_type_sup, start_link, return_ms()},
-     {amqp_channel_sup_sup, start_link, return_ms()},
-     {amqp_channel_sup_sup, start_channel_sup, return_ms()},
-     {amqp_channel_sup, start_link, return_ms()},
-     {amqp_network_connection, start_infrastructure, return_ms()},
-     {amqp_network_connection, start_heartbeat, return_ms()},
-     {amqp_channel, start_writer, return_ms()}].
-
-ncl_args() ->
-    [{amqp_main_reader, start_link, return_ms()},
-     {amqp_gen_connection, set_closing_state, []},
-     {amqp_gen_connection, handle_channels_terminated, []},
-     {amqp_network_connection, connect, []},
-     {amqp_direct_connection, connect, []},
-     {amqp_gen_connection, terminate, []}].
-
-cml_args() ->
-     [{amqp_channels_manager, handle_open_channel, return_ms()},
-      {amqp_channels_manager, handle_channel_down, []},
-      {amqp_channels_manager, signal_channels_connection_closing, []}].
-
-cl_args() ->
-    [{amqp_channel, init, []},
-     {amqp_channel_util, open_channel, []},
-     {amqp_channel, terminate, []}].
-
-m_args() ->
-    [{amqp_channel, do, return_ms()},
-     {amqp_channel, handle_method, []},
-     {amqp_gen_connection, handle_method, []},
-     {amqp_network_connection, do, return_ms()},
-     {amqp_network_connection, handshake_recv, return_ms()}].
-
-tpl_list(ArgsList) ->
-    [{ok, _} = dbg:tpl(Module, Func, Ms) || {Module, Func, Ms} <- ArgsList],
-    ok.
-
-ctpl_list(ArgsList) ->
-    [{ok, _} = dbg:ctpl(Module, Func) || {Module, Func, _} <- ArgsList],
-    ok.
-
-return_ms() ->
-    dbg:fun2ms(fun(_) -> return_trace() end).
diff --git a/rabbitmq-server/deps/amqp_client/test/negative_test_util.erl b/rabbitmq-server/deps/amqp_client/test/negative_test_util.erl
deleted file mode 100644 (file)
index 945ff7d..0000000
+++ /dev/null
@@ -1,215 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(negative_test_util).
-
--include("amqp_client_internal.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--compile(export_all).
-
-non_existent_exchange_test() ->
-    {ok, Connection} = test_util:new_connection(),
-    X = <<"test">>,
-    RoutingKey = <<"a">>,
-    Payload = <<"foobar">>,
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    {ok, OtherChannel} = amqp_connection:open_channel(Connection),
-    amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
-
-    %% Deliberately mix up the routingkey and exchange arguments
-    Publish = #'basic.publish'{exchange = RoutingKey, routing_key = X},
-    amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
-    test_util:wait_for_death(Channel),
-
-    %% Make sure Connection and OtherChannel still serve us and are not dead
-    {ok, _} = amqp_connection:open_channel(Connection),
-    amqp_channel:call(OtherChannel, #'exchange.delete'{exchange = X}),
-    amqp_connection:close(Connection).
-
-bogus_rpc_test() ->
-    {ok, Connection} = test_util:new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    %% Deliberately bind to a non-existent queue
-    Bind = #'queue.bind'{exchange    = <<"amq.topic">>,
-                         queue       = <<"does-not-exist">>,
-                         routing_key = <<>>},
-    try amqp_channel:call(Channel, Bind) of
-        _ -> exit(expected_to_exit)
-    catch
-        exit:{{shutdown, {server_initiated_close, Code, _}},_} ->
-            ?assertMatch(?NOT_FOUND, Code)
-    end,
-    test_util:wait_for_death(Channel),
-    ?assertMatch(true, is_process_alive(Connection)),
-    amqp_connection:close(Connection).
-
-hard_error_test() ->
-    {ok, Connection} = test_util:new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    {ok, OtherChannel} = amqp_connection:open_channel(Connection),
-    OtherChannelMonitor = erlang:monitor(process, OtherChannel),
-    Qos = #'basic.qos'{prefetch_size = 10000000},
-    try amqp_channel:call(Channel, Qos) of
-        _ -> exit(expected_to_exit)
-    catch
-        exit:{{shutdown, {connection_closing,
-                          {server_initiated_close, ?NOT_IMPLEMENTED, _}}}, _} ->
-            ok
-    end,
-    receive
-        {'DOWN', OtherChannelMonitor, process, OtherChannel, OtherExit} ->
-            ?assertMatch({shutdown,
-                          {connection_closing,
-                           {server_initiated_close, ?NOT_IMPLEMENTED, _}}},
-                         OtherExit)
-    end,
-    test_util:wait_for_death(Channel),
-    test_util:wait_for_death(Connection).
-
-%% The connection should die if the underlying connection is prematurely
-%% closed. For a network connection, this means that the TCP socket is
-%% closed. For a direct connection (remotely only, of course), this means that
-%% the RabbitMQ node appears as down.
-connection_failure_test() ->
-    {ok, Connection} = test_util:new_connection(),
-    case amqp_connection:info(Connection, [type, amqp_params]) of
-        [{type, direct}, {amqp_params, Params}]  ->
-            case Params#amqp_params_direct.node of
-                N when N == node() ->
-                    amqp_connection:close(Connection);
-                N ->
-                    true = erlang:disconnect_node(N),
-                    net_adm:ping(N)
-            end;
-        [{type, network}, {amqp_params, _}] ->
-            [{sock, Sock}] = amqp_connection:info(Connection, [sock]),
-            ok = gen_tcp:close(Sock)
-    end,
-    test_util:wait_for_death(Connection),
-    ok.
-
-%% An error in a channel should result in the death of the entire connection.
-%% The death of the channel is caused by an error in generating the frames
-%% (writer dies)
-channel_writer_death_test() ->
-    {ok, Connection} = test_util:new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    Publish = #'basic.publish'{routing_key = <<>>, exchange = <<>>},
-    QoS = #'basic.qos'{prefetch_count = 0},
-    Message = #amqp_msg{props = <<>>, payload = <<>>},
-    amqp_channel:cast(Channel, Publish, Message),
-    ?assertExit(_, amqp_channel:call(Channel, QoS)),
-    test_util:wait_for_death(Channel),
-    test_util:wait_for_death(Connection),
-    ok.
-
-%% An error in the channel process should result in the death of the entire
-%% connection. The death of the channel is caused by making a call with an
-%% invalid message to the channel process
-channel_death_test() ->
-    {ok, Connection} = test_util:new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    ?assertExit(_, amqp_channel:call(Channel, bogus_message)),
-    test_util:wait_for_death(Channel),
-    test_util:wait_for_death(Connection),
-    ok.
-
-%% Attempting to send a shortstr longer than 255 bytes in a property field
-%% should fail - this only applies to the network case
-shortstr_overflow_property_test() ->
-    {ok, Connection} = test_util:new_connection(just_network),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    SentString = << <<"k">> || _ <- lists:seq(1, 340)>>,
-    #'queue.declare_ok'{queue = Q}
-        = amqp_channel:call(Channel, #'queue.declare'{exclusive = true}),
-    Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
-    PBasic = #'P_basic'{content_type = SentString},
-    AmqpMsg = #amqp_msg{payload = <<"foobar">>, props = PBasic},
-    QoS = #'basic.qos'{prefetch_count = 0},
-    amqp_channel:cast(Channel, Publish, AmqpMsg),
-    ?assertExit(_, amqp_channel:call(Channel, QoS)),
-    test_util:wait_for_death(Channel),
-    test_util:wait_for_death(Connection),
-    ok.
-
-%% Attempting to send a shortstr longer than 255 bytes in a method's field
-%% should fail - this only applies to the network case
-shortstr_overflow_field_test() ->
-    {ok, Connection} = test_util:new_connection(just_network),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    SentString = << <<"k">> || _ <- lists:seq(1, 340)>>,
-    #'queue.declare_ok'{queue = Q}
-        = amqp_channel:call(Channel, #'queue.declare'{exclusive = true}),
-    ?assertExit(_, amqp_channel:call(
-                       Channel, #'basic.consume'{queue = Q,
-                                                 no_ack = true,
-                                                 consumer_tag = SentString})),
-    test_util:wait_for_death(Channel),
-    test_util:wait_for_death(Connection),
-    ok.
-
-%% Simulates a #'connection.open'{} method received on non-zero channel. The
-%% connection is expected to send a '#connection.close{}' to the server with
-%% reply code command_invalid
-command_invalid_over_channel_test() ->
-    {ok, Connection} = test_util:new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    MonitorRef = erlang:monitor(process, Connection),
-    case amqp_connection:info(Connection, [type]) of
-        [{type, direct}]  -> Channel ! {send_command, #'connection.open'{}};
-        [{type, network}] -> gen_server:cast(Channel,
-                                 {method, #'connection.open'{}, none, noflow})
-    end,
-    assert_down_with_error(MonitorRef, command_invalid),
-    ?assertNot(is_process_alive(Channel)),
-    ok.
-
-%% Simulates a #'basic.ack'{} method received on channel zero. The connection
-%% is expected to send a '#connection.close{}' to the server with reply code
-%% command_invalid - this only applies to the network case
-command_invalid_over_channel0_test() ->
-    {ok, Connection} = test_util:new_connection(just_network),
-    gen_server:cast(Connection, {method, #'basic.ack'{}, none, noflow}),
-    MonitorRef = erlang:monitor(process, Connection),
-    assert_down_with_error(MonitorRef, command_invalid),
-    ok.
-
-assert_down_with_error(MonitorRef, CodeAtom) ->
-    receive
-        {'DOWN', MonitorRef, process, _, Reason} ->
-            {shutdown, {server_misbehaved, Code, _}} = Reason,
-            ?assertMatch(CodeAtom, ?PROTOCOL:amqp_exception(Code))
-    after 2000 ->
-        exit(did_not_die)
-    end.
-
-non_existent_user_test() ->
-    Params = [{username, <<"no-user">>}, {password, <<"no-user">>}],
-    ?assertMatch({error, {auth_failure, _}}, test_util:new_connection(Params)).
-
-invalid_password_test() ->
-    Params = [{username, <<"guest">>}, {password, <<"bad">>}],
-    ?assertMatch({error, {auth_failure, _}}, test_util:new_connection(Params)).
-
-non_existent_vhost_test() ->
-    Params = [{virtual_host, <<"oops">>}],
-    ?assertMatch({error, not_allowed}, test_util:new_connection(Params)).
-
-no_permission_test() ->
-    Params = [{username, <<"test_user_no_perm">>},
-              {password, <<"test_user_no_perm">>}],
-    ?assertMatch({error, not_allowed}, test_util:new_connection(Params)).
diff --git a/rabbitmq-server/deps/amqp_client/test/system_SUITE.erl b/rabbitmq-server/deps/amqp_client/test/system_SUITE.erl
new file mode 100644 (file)
index 0000000..e9caeac
--- /dev/null
@@ -0,0 +1,1450 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(system_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-include("amqp_client.hrl").
+-include("amqp_client_internal.hrl").
+
+-compile(export_all).
+
+-define(UNAUTHORIZED_USER, <<"test_user_no_perm">>).
+
+%% The latch constant defines how many processes are spawned in order
+%% to run certain functionality in parallel. It follows the standard
+%% countdown latch pattern.
+-define(LATCH, 100).
+
+%% The wait constant defines how long a consumer waits before it
+%% unsubscribes
+-define(WAIT, 200).
+
+%% How to long wait for a process to die after an expected failure
+-define(PROCESS_EXIT_TIMEOUT, 5000).
+
+all() ->
+    [
+      {group, direct_connection_tests},
+      {group, network_connection_tests}
+    ].
+
+-define(COMMON_PARALLEL_TEST_CASES, [
+    simultaneous_close,
+    basic_recover,
+    basic_consume,
+    consume_notification,
+    basic_nack,
+    large_content,
+    lifecycle,
+    no_vhost,
+    nowait_exchange_declare,
+    channel_repeat_open_close,
+    channel_multi_open_close,
+    basic_ack,
+    basic_ack_call,
+    channel_lifecycle,
+    queue_unbind,
+    sync_method_serialization,
+    async_sync_method_serialization,
+    sync_async_method_serialization,
+    rpc,
+    rpc_client,
+    confirm,
+    confirm_barrier,
+    confirm_select_before_wait,
+    confirm_barrier_timeout,
+    confirm_barrier_die_timeout,
+    default_consumer,
+    subscribe_nowait,
+    non_existent_exchange,
+    non_existent_user,
+    invalid_password,
+    non_existent_vhost,
+    no_permission,
+    channel_writer_death,
+    command_invalid_over_channel,
+    named_connection,
+    {teardown_loop, [{repeat, 100}, parallel], [teardown]},
+    {bogus_rpc_loop, [{repeat, 100}, parallel], [bogus_rpc]},
+    {hard_error_loop, [{repeat, 100}, parallel], [hard_error]}
+  ]).
+-define(COMMON_NON_PARALLEL_TEST_CASES, [
+    basic_qos, %% Not parallel because it's time-based.
+    connection_failure,
+    channel_death
+  ]).
+
+groups() ->
+    [
+      {direct_connection_tests, [], [
+          {parallel_tests, [parallel], [
+              basic_get_direct,
+              no_user,
+              no_password
+              | ?COMMON_PARALLEL_TEST_CASES]},
+          {non_parallel_tests, [], ?COMMON_NON_PARALLEL_TEST_CASES}
+        ]},
+      {network_connection_tests, [], [
+          {parallel_tests, [parallel], [
+              basic_get_ipv4,
+              basic_get_ipv6,
+              basic_get_ipv4_ssl,
+              basic_get_ipv6_ssl,
+              pub_and_close,
+              channel_tune_negotiation,
+              shortstr_overflow_property,
+              shortstr_overflow_field,
+              command_invalid_over_channel0
+              | ?COMMON_PARALLEL_TEST_CASES]},
+          {non_parallel_tests, [], [
+              connection_blocked_network
+              | ?COMMON_NON_PARALLEL_TEST_CASES]}
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config,
+      rabbit_ct_broker_helpers:setup_steps() ++ [
+        fun ensure_amqp_client_srcdir/1,
+        fun create_unauthorized_user/1
+      ]).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config, [
+        fun delete_unauthorized_user/1
+      ] ++ rabbit_ct_broker_helpers:teardown_steps()).
+
+ensure_amqp_client_srcdir(Config) ->
+    rabbit_ct_helpers:ensure_application_srcdir(Config,
+                                                amqp_client, amqp_client).
+
+create_unauthorized_user(Config) ->
+    Cmd = ["add_user", ?UNAUTHORIZED_USER, ?UNAUTHORIZED_USER],
+    case rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd) of
+        {ok, _} -> rabbit_ct_helpers:set_config(
+                  Config,
+                  [{rmq_unauthorized_username, ?UNAUTHORIZED_USER},
+                   {rmq_unauthorized_password, ?UNAUTHORIZED_USER}]);
+        _       -> {skip, "Failed to create unauthorized user"}
+    end.
+
+delete_unauthorized_user(Config) ->
+    Cmd = ["delete_user", ?UNAUTHORIZED_USER],
+    rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd),
+    Config.
+
+%% -------------------------------------------------------------------
+%% Groups.
+%% -------------------------------------------------------------------
+
+init_per_group(direct_connection_tests, Config) ->
+    rabbit_ct_helpers:set_config(Config, {amqp_client_conn_type, direct});
+init_per_group(network_connection_tests, Config) ->
+    rabbit_ct_helpers:set_config(Config, {amqp_client_conn_type, network});
+init_per_group(Group, Config)
+  when Group =:= parallel_tests
+  orelse Group =:= non_parallel_tests
+  orelse Group =:= teardown_loop
+  orelse Group =:= bogus_rpc_loop
+  orelse Group =:= hard_error_loop ->
+    case ?config(amqp_client_conn_type, Config) of
+        undefined -> rabbit_ct_helpers:set_config(
+                       Config, {amqp_client_conn_type, network});
+        _         -> Config
+    end.
+
+end_per_group(_, Config) ->
+    Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+init_per_testcase(Test, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Test),
+    {Username, Password} = case Test of
+        no_user           -> {none,
+                              none};
+        no_password       -> {?config(rmq_username, Config),
+                              none};
+        non_existent_user -> {<<"no-user">>,
+                              <<"no-user">>};
+        invalid_password  -> {?config(rmq_username, Config),
+                              <<"bad">>};
+        no_permission     -> {?config(rmq_unauthorized_username, Config),
+                              ?config(rmq_unauthorized_password, Config)};
+        _                 -> {?config(rmq_username, Config),
+                              ?config(rmq_password, Config)}
+    end,
+    VHost = case Test of
+        no_vhost           -> <<"/noexist">>;
+        non_existent_vhost -> <<"oops">>;
+        _                  -> ?config(rmq_vhost, Config)
+    end,
+    Hostname = case Test of
+        basic_get_ipv4     -> "127.0.0.1";
+        basic_get_ipv6     -> "::1";
+        basic_get_ipv4_ssl -> "127.0.0.1";
+        basic_get_ipv6_ssl -> "::1";
+        _                  -> ?config(rmq_hostname, Config)
+    end,
+    {Port, SSLOpts} = if
+        Test =:= basic_get_ipv4_ssl orelse
+        Test =:= basic_get_ipv6_ssl ->
+            CertsDir = ?config(rmq_certsdir, Config),
+            {
+              rabbit_ct_broker_helpers:get_node_config(Config, 0,
+                tcp_port_amqp_tls),
+              [
+                {cacertfile, filename:join([CertsDir, "testca", "cacert.pem"])},
+                {certfile, filename:join([CertsDir, "client", "cert.pem"])},
+                {keyfile, filename:join([CertsDir, "client", "key.pem"])},
+                {verify, verify_peer},
+                {fail_if_no_peer_cert, true}
+              ]
+            };
+        true ->
+            {
+              rabbit_ct_broker_helpers:get_node_config(Config, 0,
+                tcp_port_amqp),
+              none
+            }
+    end,
+    ChannelMax = case Test of
+        channel_tune_negotiation -> 10;
+        _                        -> ?config(rmq_channel_max, Config)
+    end,
+    ConnParams = case ?config(amqp_client_conn_type, Config) of
+        direct ->
+            #amqp_params_direct{
+              username     = Username,
+              password     = Password,
+              node         = rabbit_ct_broker_helpers:get_node_config(Config,
+                               0, nodename),
+              virtual_host = VHost};
+        network ->
+            #amqp_params_network{
+              username     = Username,
+              password     = Password,
+              host         = Hostname,
+              port         = Port,
+              virtual_host = VHost,
+              channel_max  = ChannelMax,
+              ssl_options  = SSLOpts}
+    end,
+    rabbit_ct_helpers:set_config(Config,
+                                 {amqp_client_conn_params, ConnParams}).
+
+end_per_testcase(Test, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Test).
+
+%% -------------------------------------------------------------------
+
+basic_get_direct(Config)   -> basic_get(Config).
+basic_get_ipv4(Config)     -> basic_get(Config).
+basic_get_ipv6(Config)     -> basic_get(Config).
+basic_get_ipv4_ssl(Config) -> basic_get(Config).
+basic_get_ipv6_ssl(Config) -> basic_get(Config).
+
+basic_get(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    Payload = <<"foobar">>,
+    {ok, Q} = setup_publish(Channel, Payload),
+    get_and_assert_equals(Channel, Q, Payload),
+    get_and_assert_empty(Channel, Q),
+    teardown(Connection, Channel).
+
+named_connection(Config) ->
+    ConnName = <<"Custom Name">>,
+    Params = ?config(amqp_client_conn_params, Config),
+    {ok, Connection} = amqp_connection:start(Params, ConnName),
+    ConnName = amqp_connection:connection_name(Connection),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    Payload = <<"foobar">>,
+    {ok, Q} = setup_publish(Channel, Payload),
+    get_and_assert_equals(Channel, Q, Payload),
+    get_and_assert_empty(Channel, Q),
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+simultaneous_close(Config) ->
+    {ok, Connection} = new_connection(Config),
+    %% We pick a high channel number, to avoid any conflict with other
+    %% tests running in parallel.
+    ChannelNumber = case ?config(rmq_channel_max, Config) of
+        0 -> ?MAX_CHANNEL_NUMBER;
+        N -> N
+    end,
+    {ok, Channel1} = amqp_connection:open_channel(Connection, ChannelNumber),
+
+    %% Publish to non-existent exchange and immediately close channel
+    amqp_channel:cast(Channel1, #'basic.publish'{exchange = <<"does-not-exist">>,
+                                                 routing_key = <<"a">>},
+                               #amqp_msg{payload = <<"foobar">>}),
+    try amqp_channel:close(Channel1) of
+        ok      -> wait_for_death(Channel1);
+        closing -> wait_for_death(Channel1)
+    catch
+        exit:{noproc, _}                                              -> ok;
+        exit:{{shutdown, {server_initiated_close, ?NOT_FOUND, _}}, _} -> ok
+    end,
+
+    %% Channel2 (opened with the exact same number as Channel1)
+    %% should not receive a close_ok (which is intended for Channel1)
+    {ok, Channel2} = amqp_connection:open_channel(Connection, ChannelNumber),
+
+    %% Make sure Channel2 functions normally
+    #'exchange.declare_ok'{} =
+        amqp_channel:call(Channel2,
+          #'exchange.declare'{exchange = <<"simultaneous_close">>}),
+    #'exchange.delete_ok'{} =
+        amqp_channel:call(Channel2,
+          #'exchange.delete'{exchange = <<"simultaneous_close">>}),
+
+    teardown(Connection, Channel2).
+
+%% -------------------------------------------------------------------
+
+basic_qos(Config) ->
+    [NoQos, Qos] = [basic_qos_test(Config, Prefetch) || Prefetch <- [0,1]],
+    ExpectedRatio = (1+1) / (1+50/5),
+    FudgeFactor = 2, %% account for timing variations
+    ct:pal(?LOW_IMPORTANCE,
+      "QOS=0 -> ~p (noqos)~n"
+      "QOS=1 -> ~p (qos)~n"
+      "qos / noqos < ~p * ~p = ~p < ~p = ~p~n",
+      [NoQos, Qos, ExpectedRatio, FudgeFactor, Qos / NoQos, ExpectedRatio * FudgeFactor, Qos / NoQos < ExpectedRatio * FudgeFactor]),
+    true = Qos / NoQos < ExpectedRatio * FudgeFactor.
+
+basic_qos_test(Config, Prefetch) ->
+    {ok, Connection} = new_connection(Config),
+    Messages = 100,
+    Workers = [5, 50],
+    Parent = self(),
+    {ok, Chan} = amqp_connection:open_channel(Connection),
+    #'queue.declare_ok'{queue = Q} =
+        amqp_channel:call(Chan, #'queue.declare'{}),
+    Kids = [spawn(
+            fun() ->
+                {ok, Channel} = amqp_connection:open_channel(Connection),
+                amqp_channel:call(Channel,
+                                  #'basic.qos'{prefetch_count = Prefetch}),
+                amqp_channel:call(Channel,
+                                  #'basic.consume'{queue = Q}),
+                Parent ! finished,
+                sleeping_consumer(Channel, Sleep, Parent)
+            end) || Sleep <- Workers],
+    latch_loop(length(Kids)),
+    spawn(fun() -> {ok, Channel} = amqp_connection:open_channel(Connection),
+                   producer_loop(Channel, Q, Messages)
+          end),
+    {Res, _} = timer:tc(erlang, apply, [fun latch_loop/1, [Messages]]),
+    [Kid ! stop || Kid <- Kids],
+    latch_loop(length(Kids)),
+    teardown(Connection, Chan),
+    Res.
+
+sleeping_consumer(Channel, Sleep, Parent) ->
+    receive
+        stop ->
+            do_stop(Channel, Parent);
+        #'basic.consume_ok'{} ->
+            sleeping_consumer(Channel, Sleep, Parent);
+        #'basic.cancel_ok'{}  ->
+            exit(unexpected_cancel_ok);
+        {#'basic.deliver'{delivery_tag = DeliveryTag}, _Content} ->
+            Parent ! finished,
+            receive stop -> do_stop(Channel, Parent)
+            after Sleep -> ok
+            end,
+            amqp_channel:cast(Channel,
+                              #'basic.ack'{delivery_tag = DeliveryTag}),
+            sleeping_consumer(Channel, Sleep, Parent)
+    end.
+
+do_stop(Channel, Parent) ->
+    Parent ! finished,
+    amqp_channel:close(Channel),
+    wait_for_death(Channel),
+    exit(normal).
+
+producer_loop(Channel, _RoutingKey, 0) ->
+    amqp_channel:close(Channel),
+    wait_for_death(Channel),
+    ok;
+
+producer_loop(Channel, RoutingKey, N) ->
+    Publish = #'basic.publish'{exchange = <<>>, routing_key = RoutingKey},
+    amqp_channel:call(Channel, Publish, #amqp_msg{payload = <<>>}),
+    producer_loop(Channel, RoutingKey, N - 1).
+
+%% -------------------------------------------------------------------
+
+basic_recover(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(
+                        Connection, {amqp_direct_consumer, [self()]}),
+    #'queue.declare_ok'{queue = Q} =
+        amqp_channel:call(Channel, #'queue.declare'{}),
+    #'basic.consume_ok'{consumer_tag = Tag} =
+        amqp_channel:call(Channel, #'basic.consume'{queue = Q}),
+    receive #'basic.consume_ok'{consumer_tag = Tag} -> ok end,
+    Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+    amqp_channel:call(Channel, Publish, #amqp_msg{payload = <<"foobar">>}),
+    receive
+        {#'basic.deliver'{consumer_tag = Tag}, _} ->
+            %% no_ack set to false, but don't send ack
+            ok
+    end,
+    BasicRecover = #'basic.recover'{requeue = true},
+    amqp_channel:cast(Channel, BasicRecover),
+    receive
+        {#'basic.deliver'{consumer_tag = Tag,
+                          delivery_tag = DeliveryTag2}, _} ->
+            amqp_channel:cast(Channel,
+                              #'basic.ack'{delivery_tag = DeliveryTag2})
+    end,
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+basic_consume(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    X = <<"basic_consume">>,
+    amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
+    RoutingKey = <<"key">>,
+    Parent = self(),
+    [spawn_link(fun () ->
+                        consume_loop(Channel, X, RoutingKey, Parent, <<Tag:32>>)
+                end) || Tag <- lists:seq(1, ?LATCH)],
+    latch_loop(?LATCH),
+    Publish = #'basic.publish'{exchange = X, routing_key = RoutingKey},
+    amqp_channel:call(Channel, Publish, #amqp_msg{payload = <<"foobar">>}),
+    latch_loop(?LATCH),
+    amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
+    teardown(Connection, Channel).
+
+consume_loop(Channel, X, RoutingKey, Parent, Tag) ->
+    #'queue.declare_ok'{queue = Q} =
+        amqp_channel:call(Channel, #'queue.declare'{}),
+    #'queue.bind_ok'{} =
+        amqp_channel:call(Channel, #'queue.bind'{queue = Q,
+                                                 exchange = X,
+                                                 routing_key = RoutingKey}),
+    #'basic.consume_ok'{} =
+        amqp_channel:call(Channel,
+                          #'basic.consume'{queue = Q, consumer_tag = Tag}),
+    receive #'basic.consume_ok'{consumer_tag = Tag} -> ok end,
+    Parent ! finished,
+    receive {#'basic.deliver'{}, _} -> ok end,
+    #'basic.cancel_ok'{} =
+        amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = Tag}),
+    receive #'basic.cancel_ok'{consumer_tag = Tag} -> ok end,
+    Parent ! finished.
+
+%% -------------------------------------------------------------------
+
+consume_notification(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    #'queue.declare_ok'{queue = Q} =
+        amqp_channel:call(Channel, #'queue.declare'{}),
+    #'basic.consume_ok'{consumer_tag = CTag} = ConsumeOk =
+        amqp_channel:call(Channel, #'basic.consume'{queue = Q}),
+    receive ConsumeOk -> ok end,
+    #'queue.delete_ok'{} =
+        amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
+    receive #'basic.cancel'{consumer_tag = CTag} -> ok end,
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+basic_nack(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    #'queue.declare_ok'{queue = Q}
+        = amqp_channel:call(Channel, #'queue.declare'{}),
+
+    Payload = <<"m1">>,
+
+    amqp_channel:call(Channel,
+                      #'basic.publish'{exchange = <<>>, routing_key = Q},
+                      #amqp_msg{payload = Payload}),
+
+    #'basic.get_ok'{delivery_tag = Tag} =
+        get_and_assert_equals(Channel, Q, Payload, false),
+
+    amqp_channel:call(Channel, #'basic.nack'{delivery_tag = Tag,
+                                             multiple     = false,
+                                             requeue      = false}),
+
+    get_and_assert_empty(Channel, Q),
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+large_content(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    #'queue.declare_ok'{queue = Q}
+        = amqp_channel:call(Channel, #'queue.declare'{}),
+    F = list_to_binary([rand_compat:uniform(256)-1 || _ <- lists:seq(1, 1000)]),
+    Payload = list_to_binary([F || _ <- lists:seq(1, 1000)]),
+    Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+    amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
+    get_and_assert_equals(Channel, Q, Payload),
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+lifecycle(Config) ->
+    {ok, Connection} = new_connection(Config),
+    X = <<"lifecycle">>,
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    amqp_channel:call(Channel,
+                      #'exchange.declare'{exchange = X,
+                                          type = <<"topic">>}),
+    Parent = self(),
+    [spawn(fun () -> queue_exchange_binding(Channel, X, Parent, Tag) end)
+     || Tag <- lists:seq(1, ?LATCH)],
+    latch_loop(?LATCH),
+    amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
+    teardown(Connection, Channel).
+
+queue_exchange_binding(Channel, X, Parent, Tag) ->
+    receive
+        nothing -> ok
+    after (?LATCH - Tag rem 7) * 10 ->
+        ok
+    end,
+    Q = list_to_binary(rabbit_misc:format("lifecycle.a.b.c.~b", [Tag])),
+    Binding = <<"lifecycle.a.b.c.*">>,
+    #'queue.declare_ok'{queue = Q1}
+        = amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
+    Q = Q1,
+    Route = #'queue.bind'{queue = Q,
+                          exchange = X,
+                          routing_key = Binding},
+    amqp_channel:call(Channel, Route),
+    amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
+    Parent ! finished.
+
+%% -------------------------------------------------------------------
+
+no_user(Config)     -> no_something(Config).
+no_password(Config) -> no_something(Config).
+
+no_something(Config) ->
+    {ok, Connection} = new_connection(Config),
+    amqp_connection:close(Connection),
+    wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+no_vhost(Config) ->
+    {error, not_allowed} = new_connection(Config),
+    ok.
+
+%% -------------------------------------------------------------------
+
+nowait_exchange_declare(Config) ->
+    {ok, Connection} = new_connection(Config),
+    X = <<"nowait_exchange_declare">>,
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    ok = amqp_channel:call(Channel, #'exchange.declare'{exchange = X,
+                                                        type = <<"topic">>,
+                                                        nowait = true}),
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+channel_repeat_open_close(Config) ->
+    {ok, Connection} = new_connection(Config),
+    lists:foreach(
+        fun(_) ->
+            {ok, Ch} = amqp_connection:open_channel(Connection),
+            ok = amqp_channel:close(Ch)
+        end, lists:seq(1, 50)),
+    amqp_connection:close(Connection),
+    wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+channel_multi_open_close(Config) ->
+    {ok, Connection} = new_connection(Config),
+    [spawn_link(
+        fun() ->
+            try amqp_connection:open_channel(Connection) of
+                {ok, Ch}           -> try amqp_channel:close(Ch) of
+                                          ok                 -> ok;
+                                          closing            -> ok
+                                      catch
+                                          exit:{noproc, _} -> ok;
+                                          exit:{normal, _} -> ok
+                                      end;
+                closing            -> ok
+            catch
+                exit:{noproc, _} -> ok;
+                exit:{normal, _} -> ok
+            end
+        end) || _ <- lists:seq(1, 50)],
+    erlang:yield(),
+    amqp_connection:close(Connection),
+    wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+basic_ack(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    {ok, Q} = setup_publish(Channel),
+    {#'basic.get_ok'{delivery_tag = Tag}, _}
+        = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = false}),
+    amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag}),
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+basic_ack_call(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    {ok, Q} = setup_publish(Channel),
+    {#'basic.get_ok'{delivery_tag = Tag}, _}
+        = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = false}),
+    amqp_channel:call(Channel, #'basic.ack'{delivery_tag = Tag}),
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+channel_lifecycle(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    amqp_channel:close(Channel),
+    {ok, Channel2} = amqp_connection:open_channel(Connection),
+    teardown(Connection, Channel2).
+
+%% -------------------------------------------------------------------
+
+queue_unbind(Config) ->
+    {ok, Connection} = new_connection(Config),
+    X = <<"queue_unbind-eggs">>,
+    Q = <<"queue_unbind-foobar">>,
+    Key = <<"quay">>,
+    Payload = <<"foobar">>,
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
+    amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
+    Bind = #'queue.bind'{queue = Q,
+                         exchange = X,
+                         routing_key = Key},
+    amqp_channel:call(Channel, Bind),
+    Publish = #'basic.publish'{exchange = X, routing_key = Key},
+    amqp_channel:call(Channel, Publish, Msg = #amqp_msg{payload = Payload}),
+    get_and_assert_equals(Channel, Q, Payload),
+    Unbind = #'queue.unbind'{queue = Q,
+                             exchange = X,
+                             routing_key = Key},
+    amqp_channel:call(Channel, Unbind),
+    amqp_channel:call(Channel, Publish, Msg),
+    get_and_assert_empty(Channel, Q),
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+%% This is designed to exercize the internal queuing mechanism
+%% to ensure that sync methods are properly serialized
+sync_method_serialization(Config) ->
+    abstract_method_serialization_test(
+        "sync_method_serialization", Config,
+        fun (_, _) -> ok end,
+        fun (Channel, _, _, _, Count) ->
+                Q = fmt("sync_method_serialization-~p", [Count]),
+                #'queue.declare_ok'{queue = Q1} =
+                    amqp_channel:call(Channel,
+                                      #'queue.declare'{queue     = Q,
+                                                       exclusive = true}),
+                Q = Q1
+        end,
+        fun (_, _, _, _, _) -> ok end).
+
+%% This is designed to exercize the internal queuing mechanism
+%% to ensure that sending async methods and then a sync method is serialized
+%% properly
+async_sync_method_serialization(Config) ->
+    abstract_method_serialization_test(
+        "async_sync_method_serialization", Config,
+        fun (Channel, _X) ->
+                #'queue.declare_ok'{queue = Q} =
+                    amqp_channel:call(Channel, #'queue.declare'{}),
+                Q
+        end,
+        fun (Channel, X, Payload, _, _) ->
+                %% The async methods
+                ok = amqp_channel:call(Channel,
+                                       #'basic.publish'{exchange = X,
+                                                        routing_key = <<"a">>},
+                                       #amqp_msg{payload = Payload})
+        end,
+        fun (Channel, X, _, Q, _) ->
+                %% The sync method
+                #'queue.bind_ok'{} =
+                    amqp_channel:call(Channel,
+                                      #'queue.bind'{exchange = X,
+                                                    queue = Q,
+                                                    routing_key = <<"a">>}),
+                %% No message should have been routed
+                #'queue.declare_ok'{message_count = 0} =
+                    amqp_channel:call(Channel,
+                                      #'queue.declare'{queue = Q,
+                                                       passive = true})
+        end).
+
+%% This is designed to exercize the internal queuing mechanism
+%% to ensure that sending sync methods and then an async method is serialized
+%% properly
+sync_async_method_serialization(Config) ->
+    abstract_method_serialization_test(
+        "sync_async_method_serialization", Config,
+        fun (_, _) -> ok end,
+        fun (Channel, X, _Payload, _, _) ->
+                %% The sync methods (called with cast to resume immediately;
+                %% the order should still be preserved)
+                #'queue.declare_ok'{queue = Q} =
+                    amqp_channel:call(Channel,
+                                      #'queue.declare'{exclusive = true}),
+                amqp_channel:cast(Channel, #'queue.bind'{exchange = X,
+                                                         queue = Q,
+                                                         routing_key= <<"a">>}),
+                Q
+        end,
+        fun (Channel, X, Payload, _, MultiOpRet) ->
+                #'confirm.select_ok'{} = amqp_channel:call(
+                                           Channel, #'confirm.select'{}),
+                ok = amqp_channel:call(Channel,
+                                       #'basic.publish'{exchange = X,
+                                                        routing_key = <<"a">>},
+                                       #amqp_msg{payload = Payload}),
+                %% All queues must have gotten this message
+                true = amqp_channel:wait_for_confirms(Channel),
+                lists:foreach(
+                    fun (Q) ->
+                            #'queue.declare_ok'{message_count = 1} =
+                                amqp_channel:call(
+                                  Channel, #'queue.declare'{queue   = Q,
+                                                            passive = true})
+                    end, lists:flatten(MultiOpRet))
+        end).
+
+abstract_method_serialization_test(Test, Config,
+                                   BeforeFun, MultiOpFun, AfterFun) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    X = list_to_binary(Test),
+    Payload = list_to_binary(["x" || _ <- lists:seq(1, 1000)]),
+    OpsPerProcess = 20,
+    #'exchange.declare_ok'{} =
+        amqp_channel:call(Channel, #'exchange.declare'{exchange = X,
+                                                       type = <<"topic">>}),
+    BeforeRet = BeforeFun(Channel, X),
+    Parent = self(),
+    [spawn(fun () -> Ret = [MultiOpFun(Channel, X, Payload, BeforeRet, I)
+                            || _ <- lists:seq(1, OpsPerProcess)],
+                   Parent ! {finished, Ret}
+           end) || I <- lists:seq(1, ?LATCH)],
+    MultiOpRet = latch_loop(?LATCH),
+    AfterFun(Channel, X, Payload, BeforeRet, MultiOpRet),
+    amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+teardown(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    true = is_process_alive(Channel),
+    true = is_process_alive(Connection),
+    teardown(Connection, Channel),
+    false = is_process_alive(Channel),
+    false = is_process_alive(Connection).
+
+%% -------------------------------------------------------------------
+
+%% This tests whether RPC over AMQP produces the same result as invoking the
+%% same argument against the same underlying gen_server instance.
+rpc(Config) ->
+    {ok, Connection} = new_connection(Config),
+    Fun = fun(X) -> X + 1 end,
+    RPCHandler = fun(X) -> term_to_binary(Fun(binary_to_term(X))) end,
+    Q = <<"rpc-test">>,
+    Server = amqp_rpc_server:start(Connection, Q, RPCHandler),
+    Client = amqp_rpc_client:start(Connection, Q),
+    Input = 1,
+    Reply = amqp_rpc_client:call(Client, term_to_binary(Input)),
+    Expected = Fun(Input),
+    DecodedReply = binary_to_term(Reply),
+    Expected = DecodedReply,
+    amqp_rpc_client:stop(Client),
+    amqp_rpc_server:stop(Server),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
+    teardown(Connection, Channel).
+
+%% This tests if the RPC continues to generate valid correlation ids
+%% over a series of requests.
+rpc_client(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    Q = <<"rpc-client-test">>,
+    Latch = 255, % enough requests to tickle bad correlation ids
+    %% Start a server to return correlation ids to the client.
+    Server = spawn_link(fun() ->
+                                rpc_correlation_server(Channel, Q)
+                        end),
+    %% Generate a series of RPC requests on the same client.
+    Client = amqp_rpc_client:start(Connection, Q),
+    Parent = self(),
+    [spawn(fun() ->
+                   Reply = amqp_rpc_client:call(Client, <<>>),
+                   Parent ! {finished, Reply}
+           end) || _ <- lists:seq(1, Latch)],
+    %% Verify that the correlation ids are valid UTF-8 strings.
+    CorrelationIds = latch_loop(Latch),
+    [<<_/binary>> = DecodedId
+     || DecodedId <- [unicode:characters_to_binary(Id, utf8)
+                      || Id <- CorrelationIds]],
+    %% Cleanup.
+    Server ! stop,
+    amqp_rpc_client:stop(Client),
+    amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
+    teardown(Connection, Channel).
+
+%% Consumer of RPC requests that replies with the CorrelationId.
+rpc_correlation_server(Channel, Q) ->
+    ok = amqp_channel:register_return_handler(Channel, self()),
+    #'queue.declare_ok'{queue = Q} =
+      amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
+    #'basic.consume_ok'{} =
+      amqp_channel:call(Channel,
+                        #'basic.consume'{queue = Q,
+                                         consumer_tag = <<"server">>}),
+    ok = rpc_client_consume_loop(Channel),
+    #'basic.cancel_ok'{} =
+      amqp_channel:call(Channel,
+                        #'basic.cancel'{consumer_tag = <<"server">>}),
+    ok = amqp_channel:unregister_return_handler(Channel).
+
+rpc_client_consume_loop(Channel) ->
+    receive
+        stop ->
+            ok;
+        {#'basic.deliver'{delivery_tag = DeliveryTag},
+         #amqp_msg{props = Props}} ->
+            #'P_basic'{correlation_id = CorrelationId,
+                       reply_to = Q} = Props,
+            Properties = #'P_basic'{correlation_id = CorrelationId},
+            Publish = #'basic.publish'{exchange = <<>>,
+                                       routing_key = Q,
+                                       mandatory = true},
+            amqp_channel:call(
+              Channel, Publish, #amqp_msg{props = Properties,
+                                          payload = CorrelationId}),
+            amqp_channel:call(
+              Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
+            rpc_client_consume_loop(Channel);
+        _ ->
+            rpc_client_consume_loop(Channel)
+    after 3000 ->
+            exit(no_request_received)
+    end.
+
+%% -------------------------------------------------------------------
+
+%% Test for the network client
+%% Sends a bunch of messages and immediatly closes the connection without
+%% closing the channel. Then gets the messages back from the queue and expects
+%% all of them to have been sent.
+pub_and_close(Config) ->
+    {ok, Connection1} = new_connection(Config),
+    Payload = <<"eggs">>,
+    NMessages = 50000,
+    {ok, Channel1} = amqp_connection:open_channel(Connection1),
+    #'queue.declare_ok'{queue = Q} =
+        amqp_channel:call(Channel1, #'queue.declare'{}),
+    %% Send messages
+    pc_producer_loop(Channel1, <<>>, Q, Payload, NMessages),
+    %% Close connection without closing channels
+    amqp_connection:close(Connection1),
+    %% Get sent messages back and count them
+    {ok, Connection2} = new_connection(Config),
+    {ok, Channel2} = amqp_connection:open_channel(
+                         Connection2, {amqp_direct_consumer, [self()]}),
+    amqp_channel:call(Channel2, #'basic.consume'{queue = Q, no_ack = true}),
+    receive #'basic.consume_ok'{} -> ok end,
+    true = (pc_consumer_loop(Channel2, Payload, 0) == NMessages),
+    %% Make sure queue is empty
+    #'queue.declare_ok'{queue = Q, message_count = NRemaining} =
+        amqp_channel:call(Channel2, #'queue.declare'{queue   = Q,
+                                                     passive = true}),
+    true = (NRemaining == 0),
+    amqp_channel:call(Channel2, #'queue.delete'{queue = Q}),
+    teardown(Connection2, Channel2).
+
+pc_producer_loop(_, _, _, _, 0) -> ok;
+pc_producer_loop(Channel, X, Key, Payload, NRemaining) ->
+    Publish = #'basic.publish'{exchange = X, routing_key = Key},
+    ok = amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
+    pc_producer_loop(Channel, X, Key, Payload, NRemaining - 1).
+
+pc_consumer_loop(Channel, Payload, NReceived) ->
+    receive
+        {#'basic.deliver'{},
+         #amqp_msg{payload = DeliveredPayload}} ->
+            case DeliveredPayload of
+                Payload ->
+                    pc_consumer_loop(Channel, Payload, NReceived + 1);
+                _ ->
+                    exit(received_unexpected_content)
+            end
+    after 1000 ->
+        NReceived
+    end.
+
+%% -------------------------------------------------------------------
+
+channel_tune_negotiation(Config) ->
+    {ok, Connection} = new_connection(Config),
+    amqp_connection:close(Connection),
+    wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+confirm(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+    amqp_channel:register_confirm_handler(Channel, self()),
+    {ok, Q} = setup_publish(Channel),
+    {#'basic.get_ok'{}, _}
+        = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = false}),
+    ok = receive
+             #'basic.ack'{}  -> ok;
+             #'basic.nack'{} -> fail
+         after 2000 ->
+                 exit(did_not_receive_pub_ack)
+         end,
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+confirm_barrier(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+    [amqp_channel:call(
+        Channel,
+        #'basic.publish'{routing_key = <<"whoosh-confirm_barrier">>},
+        #amqp_msg{payload = <<"foo">>})
+     || _ <- lists:seq(1, 1000)], %% Hopefully enough to get a multi-ack
+    true = amqp_channel:wait_for_confirms(Channel),
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+confirm_select_before_wait(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    try amqp_channel:wait_for_confirms(Channel) of
+        _ -> exit(success_despite_lack_of_confirm_mode)
+    catch
+        not_in_confirm_mode -> ok
+    end,
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+confirm_barrier_timeout(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+    [amqp_channel:call(
+        Channel,
+        #'basic.publish'{routing_key = <<"whoosh-confirm_barrier_timeout">>},
+        #amqp_msg{payload = <<"foo">>})
+     || _ <- lists:seq(1, 1000)],
+    case amqp_channel:wait_for_confirms(Channel, 0) of
+        true    -> ok;
+        timeout -> ok
+    end,
+    true = amqp_channel:wait_for_confirms(Channel),
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+confirm_barrier_die_timeout(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
+    [amqp_channel:call(
+        Channel,
+        #'basic.publish'{routing_key = <<"whoosh-confirm_barrier_die_timeout">>},
+        #amqp_msg{payload = <<"foo">>})
+     || _ <- lists:seq(1, 1000)],
+    try amqp_channel:wait_for_confirms_or_die(Channel, 0) of
+        true -> ok
+    catch
+        exit:timeout -> ok
+    end,
+    amqp_connection:close(Connection),
+    wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+default_consumer(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    amqp_selective_consumer:register_default_consumer(Channel, self()),
+
+    #'queue.declare_ok'{queue = Q}
+        = amqp_channel:call(Channel, #'queue.declare'{}),
+    Pid = spawn(fun () -> receive
+                          after 10000 -> ok
+                          end
+                end),
+    #'basic.consume_ok'{} =
+        amqp_channel:subscribe(Channel, #'basic.consume'{queue = Q}, Pid),
+    erlang:monitor(process, Pid),
+    exit(Pid, shutdown),
+    receive
+        {'DOWN', _, process, _, _} ->
+            io:format("little consumer died out~n")
+    end,
+    Payload = <<"for the default consumer">>,
+    amqp_channel:call(Channel,
+                      #'basic.publish'{exchange = <<>>, routing_key = Q},
+                      #amqp_msg{payload = Payload}),
+
+    receive
+        {#'basic.deliver'{}, #'amqp_msg'{payload = Payload}} ->
+            ok
+    after 1000 ->
+            exit('default_consumer_didnt_work')
+    end,
+    teardown(Connection, Channel).
+
+%% -------------------------------------------------------------------
+
+subscribe_nowait(Config) ->
+    {ok, Conn} = new_connection(Config),
+    {ok, Ch} = amqp_connection:open_channel(Conn),
+    {ok, Q} = setup_publish(Ch),
+    CTag = <<"ctag">>,
+    amqp_selective_consumer:register_default_consumer(Ch, self()),
+    ok = amqp_channel:call(Ch, #'basic.consume'{queue        = Q,
+                                                consumer_tag = CTag,
+                                                nowait       = true}),
+    ok = amqp_channel:call(Ch, #'basic.cancel' {consumer_tag = CTag,
+                                                nowait       = true}),
+    ok = amqp_channel:call(Ch, #'basic.consume'{queue        = Q,
+                                                consumer_tag = CTag,
+                                                nowait       = true}),
+    receive
+        #'basic.consume_ok'{} ->
+            exit(unexpected_consume_ok);
+        {#'basic.deliver'{delivery_tag = DTag}, _Content} ->
+            amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag})
+    end,
+    teardown(Conn, Ch).
+
+%% -------------------------------------------------------------------
+
+%% connection.blocked, connection.unblocked
+
+connection_blocked_network(Config) ->
+    {ok, Connection} = new_connection(Config),
+    X = <<"amq.direct">>,
+    K = Payload = <<"x">>,
+    clear_resource_alarm(memory, Config),
+    timer:sleep(1000),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    Parent = self(),
+    Child = spawn_link(
+              fun() ->
+                      receive
+                          #'connection.blocked'{} -> ok
+                      end,
+                      clear_resource_alarm(memory, Config),
+                      receive
+                          #'connection.unblocked'{} -> ok
+                      end,
+                      Parent ! ok
+              end),
+    amqp_connection:register_blocked_handler(Connection, Child),
+    set_resource_alarm(memory, Config),
+    Publish = #'basic.publish'{exchange = X,
+                               routing_key = K},
+    amqp_channel:call(Channel, Publish,
+                      #amqp_msg{payload = Payload}),
+    timer:sleep(1000),
+    receive
+        ok ->
+            clear_resource_alarm(memory, Config),
+            clear_resource_alarm(disk, Config),
+            ok
+    after 10000 ->
+        clear_resource_alarm(memory, Config),
+        clear_resource_alarm(disk, Config),
+        exit(did_not_receive_connection_blocked)
+    end,
+    amqp_connection:close(Connection),
+    wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+%% Negative test cases.
+%% -------------------------------------------------------------------
+
+non_existent_exchange(Config) ->
+    {ok, Connection} = new_connection(Config),
+    X = <<"test-non_existent_exchange">>,
+    RoutingKey = <<"a-non_existent_exchange">>,
+    Payload = <<"foobar">>,
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    {ok, OtherChannel} = amqp_connection:open_channel(Connection),
+    amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
+
+    %% Deliberately mix up the routingkey and exchange arguments
+    Publish = #'basic.publish'{exchange = RoutingKey, routing_key = X},
+    amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
+    wait_for_death(Channel),
+
+    %% Make sure Connection and OtherChannel still serve us and are not dead
+    {ok, _} = amqp_connection:open_channel(Connection),
+    amqp_channel:call(OtherChannel, #'exchange.delete'{exchange = X}),
+    amqp_connection:close(Connection).
+
+%% -------------------------------------------------------------------
+
+bogus_rpc(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    %% Deliberately bind to a non-existent queue
+    Bind = #'queue.bind'{exchange    = <<"amq.topic">>,
+                         queue       = <<"does-not-exist">>,
+                         routing_key = <<>>},
+    try amqp_channel:call(Channel, Bind) of
+        _ -> exit(expected_to_exit)
+    catch
+        exit:{{shutdown, {server_initiated_close, Code, _}},_} ->
+            ?NOT_FOUND = Code
+    end,
+    wait_for_death(Channel),
+    true = is_process_alive(Connection),
+    amqp_connection:close(Connection).
+
+%% -------------------------------------------------------------------
+
+hard_error(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    {ok, OtherChannel} = amqp_connection:open_channel(Connection),
+    OtherChannelMonitor = erlang:monitor(process, OtherChannel),
+    Qos = #'basic.qos'{prefetch_size = 10000000},
+    try amqp_channel:call(Channel, Qos) of
+        _ -> exit(expected_to_exit)
+    catch
+        exit:{{shutdown, {connection_closing,
+                          {server_initiated_close, ?NOT_IMPLEMENTED, _}}}, _} ->
+            ok
+    end,
+    receive
+        {'DOWN', OtherChannelMonitor, process, OtherChannel, OtherExit} ->
+            {shutdown,
+             {connection_closing,
+              {server_initiated_close, ?NOT_IMPLEMENTED, _}}} = OtherExit
+    end,
+    wait_for_death(Channel),
+    wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+non_existent_user(Config) ->
+    {error, {auth_failure, _}} = new_connection(Config).
+
+%% -------------------------------------------------------------------
+
+invalid_password(Config) ->
+    {error, {auth_failure, _}} = new_connection(Config).
+
+%% -------------------------------------------------------------------
+
+non_existent_vhost(Config) ->
+    {error, not_allowed} = new_connection(Config).
+
+%% -------------------------------------------------------------------
+
+no_permission(Config) ->
+    {error, not_allowed} = new_connection(Config).
+
+%% -------------------------------------------------------------------
+
+%% An error in a channel should result in the death of the entire connection.
+%% The death of the channel is caused by an error in generating the frames
+%% (writer dies)
+channel_writer_death(Config) ->
+    ConnType = ?config(amqp_client_conn_type, Config),
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    Publish = #'basic.publish'{routing_key = <<>>, exchange = <<>>},
+    QoS = #'basic.qos'{prefetch_count = 0},
+    Message = #amqp_msg{props = <<>>, payload = <<>>},
+    amqp_channel:cast(Channel, Publish, Message),
+    try
+        Ret = amqp_channel:call(Channel, QoS),
+        throw({unexpected_success, Ret})
+    catch
+        exit:{{function_clause,
+               [{rabbit_channel, check_user_id_header, _, _} | _]}, _}
+        when ConnType =:= direct -> ok;
+
+        exit:{{infrastructure_died, {unknown_properties_record, <<>>}}, _}
+        when ConnType =:= network -> ok
+    end,
+    wait_for_death(Channel),
+    wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+%% The connection should die if the underlying connection is prematurely
+%% closed. For a network connection, this means that the TCP socket is
+%% closed. For a direct connection (remotely only, of course), this means that
+%% the RabbitMQ node appears as down.
+connection_failure(Config) ->
+    {ok, Connection} = new_connection(Config),
+    case amqp_connection:info(Connection, [type, amqp_params]) of
+        [{type, direct}, {amqp_params, Params}]  ->
+            case Params#amqp_params_direct.node of
+                N when N == node() ->
+                    amqp_connection:close(Connection);
+                N ->
+                    true = erlang:disconnect_node(N),
+                    net_adm:ping(N)
+            end;
+        [{type, network}, {amqp_params, _}] ->
+            [{sock, Sock}] = amqp_connection:info(Connection, [sock]),
+            ok = gen_tcp:close(Sock)
+    end,
+    wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+%% An error in the channel process should result in the death of the entire
+%% connection. The death of the channel is caused by making a call with an
+%% invalid message to the channel process
+channel_death(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    try
+        Ret = amqp_channel:call(Channel, bogus_message),
+        throw({unexpected_success, Ret})
+    catch
+        exit:{{badarg,
+               [{amqp_channel, is_connection_method, 1, _} | _]}, _} -> ok
+    end,
+    wait_for_death(Channel),
+    wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+%% Attempting to send a shortstr longer than 255 bytes in a property field
+%% should fail - this only applies to the network case
+shortstr_overflow_property(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    SentString = << <<"k">> || _ <- lists:seq(1, 340)>>,
+    #'queue.declare_ok'{queue = Q}
+        = amqp_channel:call(Channel, #'queue.declare'{exclusive = true}),
+    Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
+    PBasic = #'P_basic'{content_type = SentString},
+    AmqpMsg = #amqp_msg{payload = <<"foobar">>, props = PBasic},
+    QoS = #'basic.qos'{prefetch_count = 0},
+    amqp_channel:cast(Channel, Publish, AmqpMsg),
+    try
+        Ret = amqp_channel:call(Channel, QoS),
+        throw({unexpected_success, Ret})
+    catch
+        exit:{{infrastructure_died, content_properties_shortstr_overflow}, _} -> ok
+    end,
+    wait_for_death(Channel),
+    wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+%% Attempting to send a shortstr longer than 255 bytes in a method's field
+%% should fail - this only applies to the network case
+shortstr_overflow_field(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    SentString = << <<"k">> || _ <- lists:seq(1, 340)>>,
+    #'queue.declare_ok'{queue = Q}
+        = amqp_channel:call(Channel, #'queue.declare'{exclusive = true}),
+    try
+        Ret = amqp_channel:call(
+                Channel, #'basic.consume'{queue = Q,
+                                          no_ack = true,
+                                          consumer_tag = SentString}),
+        throw({unexpected_success, Ret})
+    catch
+        exit:{{infrastructure_died, method_field_shortstr_overflow}, _} -> ok
+    end,
+    wait_for_death(Channel),
+    wait_for_death(Connection).
+
+%% -------------------------------------------------------------------
+
+%% Simulates a #'connection.open'{} method received on non-zero channel. The
+%% connection is expected to send a '#connection.close{}' to the server with
+%% reply code command_invalid
+command_invalid_over_channel(Config) ->
+    {ok, Connection} = new_connection(Config),
+    {ok, Channel} = amqp_connection:open_channel(Connection),
+    MonitorRef = erlang:monitor(process, Connection),
+    case amqp_connection:info(Connection, [type]) of
+        [{type, direct}]  -> Channel ! {send_command, #'connection.open'{}};
+        [{type, network}] -> gen_server:cast(Channel,
+                                 {method, #'connection.open'{}, none, noflow})
+    end,
+    assert_down_with_error(MonitorRef, command_invalid),
+    false = is_process_alive(Channel).
+
+%% -------------------------------------------------------------------
+
+%% Simulates a #'basic.ack'{} method received on channel zero. The connection
+%% is expected to send a '#connection.close{}' to the server with reply code
+%% command_invalid - this only applies to the network case
+command_invalid_over_channel0(Config) ->
+    {ok, Connection} = new_connection(Config),
+    gen_server:cast(Connection, {method, #'basic.ack'{}, none, noflow}),
+    MonitorRef = erlang:monitor(process, Connection),
+    assert_down_with_error(MonitorRef, command_invalid).
+
+%% -------------------------------------------------------------------
+%% Helpers.
+%% -------------------------------------------------------------------
+
+new_connection(Config) ->
+    Params = ?config(amqp_client_conn_params, Config),
+    amqp_connection:start(Params).
+
+setup_publish(Channel) ->
+    setup_publish(Channel, <<"foobar">>).
+
+setup_publish(Channel, Payload) ->
+    #'queue.declare_ok'{queue = Q} =
+        amqp_channel:call(Channel, #'queue.declare'{exclusive = true}),
+    ok = amqp_channel:call(Channel, #'basic.publish'{exchange    = <<>>,
+                                                     routing_key = Q},
+                           #amqp_msg{payload = Payload}),
+    {ok, Q}.
+
+teardown(Connection, Channel) ->
+    amqp_channel:close(Channel),
+    wait_for_death(Channel),
+    amqp_connection:close(Connection),
+    wait_for_death(Connection).
+
+wait_for_death(Pid) ->
+    Ref = erlang:monitor(process, Pid),
+    receive
+        {'DOWN', Ref, process, Pid, _Reason} ->
+            ok
+    after ?PROCESS_EXIT_TIMEOUT ->
+            exit({timed_out_waiting_for_process_death, Pid})
+    end.
+
+latch_loop() ->
+    latch_loop(?LATCH, []).
+
+latch_loop(Latch) ->
+    latch_loop(Latch, []).
+
+latch_loop(0, Acc) ->
+    Acc;
+latch_loop(Latch, Acc) ->
+    receive
+        finished        -> latch_loop(Latch - 1, Acc);
+        {finished, Ret} -> latch_loop(Latch - 1, [Ret | Acc])
+    after ?LATCH * ?WAIT -> exit(waited_too_long)
+    end.
+
+get_and_assert_empty(Channel, Q) ->
+    #'basic.get_empty'{}
+        = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = true}).
+
+get_and_assert_equals(Channel, Q, Payload) ->
+    get_and_assert_equals(Channel, Q, Payload, true).
+
+get_and_assert_equals(Channel, Q, Payload, NoAck) ->
+    {GetOk = #'basic.get_ok'{}, Content}
+        = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = NoAck}),
+    #amqp_msg{payload = Payload2} = Content,
+    Payload = Payload2,
+    GetOk.
+
+assert_down_with_error(MonitorRef, CodeAtom) ->
+    receive
+        {'DOWN', MonitorRef, process, _, Reason} ->
+            {shutdown, {server_misbehaved, Code, _}} = Reason,
+            CodeAtom = ?PROTOCOL:amqp_exception(Code)
+    after 2000 ->
+        exit(did_not_die)
+    end.
+
+set_resource_alarm(memory, Config) ->
+    SrcDir = ?config(amqp_client_srcdir, Config),
+    Nodename = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    {ok, _} = rabbit_ct_helpers:make(Config, SrcDir, [
+        {"RABBITMQ_NODENAME=~s", [Nodename]},
+        "set-resource-alarm", "SOURCE=memory"]);
+set_resource_alarm(disk, Config) ->
+    SrcDir = ?config(amqp_client_srcdir, Config),
+    Nodename = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    {ok, _} = rabbit_ct_helpers:make(Config, SrcDir, [
+        {"RABBITMQ_NODENAME=~s", [Nodename]},
+        "set-resource-alarm", "SOURCE=disk"]).
+
+clear_resource_alarm(memory, Config) ->
+    SrcDir = ?config(amqp_client_srcdir, Config),
+    Nodename = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    {ok, _}= rabbit_ct_helpers:make(Config, SrcDir, [
+        {"RABBITMQ_NODENAME=~s", [Nodename]},
+        "clear-resource-alarm", "SOURCE=memory"]);
+clear_resource_alarm(disk, Config) ->
+    SrcDir = ?config(amqp_client_srcdir, Config),
+    Nodename = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    {ok, _}= rabbit_ct_helpers:make(Config, SrcDir, [
+        {"RABBITMQ_NODENAME=~s", [Nodename]},
+        "clear-resource-alarm", "SOURCE=disk"]).
+
+fmt(Fmt, Args) -> list_to_binary(rabbit_misc:format(Fmt, Args)).
diff --git a/rabbitmq-server/deps/amqp_client/test/test_util.erl b/rabbitmq-server/deps/amqp_client/test/test_util.erl
deleted file mode 100644 (file)
index 949d35c..0000000
+++ /dev/null
@@ -1,1219 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(test_util).
-
--include_lib("eunit/include/eunit.hrl").
--include("amqp_client_internal.hrl").
-
--compile([export_all]).
-
--define(TEST_REPEATS, 100).
-
-%% The latch constant defines how many processes are spawned in order
-%% to run certain functionality in parallel. It follows the standard
-%% countdown latch pattern.
--define(Latch, 100).
-
-%% The wait constant defines how long a consumer waits before it
-%% unsubscribes
--define(Wait, 200).
-
-%% How to long wait for a process to die after an expected failure
--define(DeathWait, 5000).
-
-%% AMQP URI parsing test
-amqp_uri_parse_test() ->
-    %% From the spec (adapted)
-    ?assertMatch({ok, #amqp_params_network{username     = <<"user">>,
-                                           password     = <<"pass">>,
-                                           host         = "host",
-                                           port         = 10000,
-                                           virtual_host = <<"vhost">>,
-                                           heartbeat    = 5}},
-                 amqp_uri:parse(
-                   "amqp://user:pass@host:10000/vhost?heartbeat=5")),
-    ?assertMatch({ok, #amqp_params_network{username     = <<"usera">>,
-                                           password     = <<"apass">>,
-                                           host         = "hoast",
-                                           port         = 10000,
-                                           virtual_host = <<"v/host">>}},
-                 amqp_uri:parse(
-                   "aMQp://user%61:%61pass@ho%61st:10000/v%2fhost")),
-    ?assertMatch({ok, #amqp_params_direct{}}, amqp_uri:parse("amqp://")),
-    ?assertMatch({ok, #amqp_params_direct{username     = <<"">>,
-                                          virtual_host = <<"">>}},
-                 amqp_uri:parse("amqp://:@/")),
-    ?assertMatch({ok, #amqp_params_network{username     = <<"">>,
-                                           password     = <<"">>,
-                                           virtual_host = <<"">>,
-                                           host         = "host"}},
-                 amqp_uri:parse("amqp://:@host/")),
-    ?assertMatch({ok, #amqp_params_direct{username = <<"user">>}},
-                 amqp_uri:parse("amqp://user@")),
-    ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
-                                           password = <<"pass">>,
-                                           host     = "localhost"}},
-                 amqp_uri:parse("amqp://user:pass@localhost")),
-    ?assertMatch({ok, #amqp_params_network{host         = "host",
-                                           virtual_host = <<"/">>}},
-                 amqp_uri:parse("amqp://host")),
-    ?assertMatch({ok, #amqp_params_network{port = 10000,
-                                           host = "localhost"}},
-                 amqp_uri:parse("amqp://localhost:10000")),
-    ?assertMatch({ok, #amqp_params_direct{virtual_host = <<"vhost">>}},
-                 amqp_uri:parse("amqp:///vhost")),
-    ?assertMatch({ok, #amqp_params_network{host         = "host",
-                                           virtual_host = <<"">>}},
-                 amqp_uri:parse("amqp://host/")),
-    ?assertMatch({ok, #amqp_params_network{host         = "host",
-                                           virtual_host = <<"/">>}},
-                 amqp_uri:parse("amqp://host/%2f")),
-    ?assertMatch({ok, #amqp_params_network{host = "::1"}},
-                 amqp_uri:parse("amqp://[::1]")),
-
-    %% Varous other cases
-    ?assertMatch({ok, #amqp_params_network{host = "host", port = 100}},
-                 amqp_uri:parse("amqp://host:100")),
-    ?assertMatch({ok, #amqp_params_network{host = "::1", port = 100}},
-                 amqp_uri:parse("amqp://[::1]:100")),
-
-    ?assertMatch({ok, #amqp_params_network{host         = "host",
-                                           virtual_host = <<"blah">>}},
-                 amqp_uri:parse("amqp://host/blah")),
-    ?assertMatch({ok, #amqp_params_network{host         = "host",
-                                           port         = 100,
-                                           virtual_host = <<"blah">>}},
-                 amqp_uri:parse("amqp://host:100/blah")),
-    ?assertMatch({ok, #amqp_params_network{host         = "::1",
-                                           virtual_host = <<"blah">>}},
-                 amqp_uri:parse("amqp://[::1]/blah")),
-    ?assertMatch({ok, #amqp_params_network{host         = "::1",
-                                           port         = 100,
-                                           virtual_host = <<"blah">>}},
-                 amqp_uri:parse("amqp://[::1]:100/blah")),
-
-    ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
-                                           password = <<"pass">>,
-                                           host     = "host"}},
-                 amqp_uri:parse("amqp://user:pass@host")),
-    ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
-                                           password = <<"pass">>,
-                                           port     = 100}},
-                 amqp_uri:parse("amqp://user:pass@host:100")),
-    ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
-                                           password = <<"pass">>,
-                                           host     = "::1"}},
-                 amqp_uri:parse("amqp://user:pass@[::1]")),
-    ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
-                                           password = <<"pass">>,
-                                           host     = "::1",
-                                           port     = 100}},
-                 amqp_uri:parse("amqp://user:pass@[::1]:100")),
-
-    %% TLS options
-    {ok, #amqp_params_network{ssl_options = TLSOpts1}} =
-        amqp_uri:parse("amqps://host/%2f?cacertfile=/path/to/cacertfile.pem"),
-    ?assertEqual(lists:usort([{cacertfile,"/path/to/cacertfile.pem"}]),
-                 lists:usort(TLSOpts1)),
-
-    {ok, #amqp_params_network{ssl_options = TLSOpts2}} =
-        amqp_uri:parse("amqps://host/%2f?cacertfile=/path/to/cacertfile.pem"
-                       "&certfile=/path/to/certfile.pem"),
-    ?assertEqual(lists:usort([{certfile,  "/path/to/certfile.pem"},
-                              {cacertfile,"/path/to/cacertfile.pem"}]),
-                 lists:usort(TLSOpts2)),
-
-    {ok, #amqp_params_network{ssl_options = TLSOpts3}} =
-        amqp_uri:parse("amqps://host/%2f?verify=verify_peer"
-                       "&fail_if_no_peer_cert=true"),
-    ?assertEqual(lists:usort([{fail_if_no_peer_cert, true},
-                              {verify,               verify_peer}
-                             ]), lists:usort(TLSOpts3)),
-
-    {ok, #amqp_params_network{ssl_options = TLSOpts4}} =
-        amqp_uri:parse("amqps://host/%2f?cacertfile=/path/to/cacertfile.pem"
-                       "&certfile=/path/to/certfile.pem"
-                       "&password=topsecret"
-                       "&depth=5"),
-    ?assertEqual(lists:usort([{certfile,  "/path/to/certfile.pem"},
-                              {cacertfile,"/path/to/cacertfile.pem"},
-                              {password,  "topsecret"},
-                              {depth,     5}]),
-                 lists:usort(TLSOpts4)),
-
-
-    %% Various failure cases
-    ?assertMatch({error, _}, amqp_uri:parse("http://www.rabbitmq.com")),
-    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:bar:baz")),
-    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo[::1]")),
-    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:[::1]")),
-    ?assertMatch({error, _}, amqp_uri:parse("amqp://[::1]foo")),
-    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:1000xyz")),
-    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:1000000")),
-    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo/bar/baz")),
-
-    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo%1")),
-    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo%1x")),
-    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo%xy")),
-
-    ok.
-
-%%--------------------------------------------------------------------
-%% Destination Parsing Tests
-%%--------------------------------------------------------------------
-
-route_destination_test() ->
-    %% valid queue
-    ?assertMatch({ok, {queue, "test"}}, parse_dest("/queue/test")),
-
-    %% valid topic
-    ?assertMatch({ok, {topic, "test"}}, parse_dest("/topic/test")),
-
-    %% valid exchange
-    ?assertMatch({ok, {exchange, {"test", undefined}}}, parse_dest("/exchange/test")),
-
-    %% valid temp queue
-    ?assertMatch({ok, {temp_queue, "test"}}, parse_dest("/temp-queue/test")),
-
-    %% valid reply queue
-    ?assertMatch({ok, {reply_queue, "test"}}, parse_dest("/reply-queue/test")),
-    ?assertMatch({ok, {reply_queue, "test/2"}}, parse_dest("/reply-queue/test/2")),
-
-    %% valid exchange with pattern
-    ?assertMatch({ok, {exchange, {"test", "pattern"}}},
-        parse_dest("/exchange/test/pattern")),
-
-    %% valid pre-declared queue
-    ?assertMatch({ok, {amqqueue, "test"}}, parse_dest("/amq/queue/test")),
-
-    %% queue without name
-    ?assertMatch({error, {invalid_destination, queue, ""}}, parse_dest("/queue")),
-    ?assertMatch({ok, {queue, undefined}}, parse_dest("/queue", true)),
-
-    %% topic without name
-    ?assertMatch({error, {invalid_destination, topic, ""}}, parse_dest("/topic")),
-
-    %% exchange without name
-    ?assertMatch({error, {invalid_destination, exchange, ""}},
-        parse_dest("/exchange")),
-
-    %% exchange default name
-    ?assertMatch({error, {invalid_destination, exchange, "//foo"}},
-        parse_dest("/exchange//foo")),
-
-    %% amqqueue without name
-    ?assertMatch({error, {invalid_destination, amqqueue, ""}},
-        parse_dest("/amq/queue")),
-
-    %% queue without name with trailing slash
-    ?assertMatch({error, {invalid_destination, queue, "/"}}, parse_dest("/queue/")),
-
-    %% topic without name with trailing slash
-    ?assertMatch({error, {invalid_destination, topic, "/"}}, parse_dest("/topic/")),
-
-    %% exchange without name with trailing slash
-    ?assertMatch({error, {invalid_destination, exchange, "/"}},
-        parse_dest("/exchange/")),
-
-    %% queue with invalid name
-    ?assertMatch({error, {invalid_destination, queue, "/foo/bar"}},
-        parse_dest("/queue/foo/bar")),
-
-    %% topic with invalid name
-    ?assertMatch({error, {invalid_destination, topic, "/foo/bar"}},
-        parse_dest("/topic/foo/bar")),
-
-    %% exchange with invalid name
-    ?assertMatch({error, {invalid_destination, exchange, "/foo/bar/baz"}},
-        parse_dest("/exchange/foo/bar/baz")),
-
-    %% unknown destination
-    ?assertMatch({error, {unknown_destination, "/blah/boo"}},
-        parse_dest("/blah/boo")),
-
-    %% queue with escaped name
-    ?assertMatch({ok, {queue, "te/st"}}, parse_dest("/queue/te%2Fst")),
-
-    %% valid exchange with escaped name and pattern
-    ?assertMatch({ok, {exchange, {"te/st", "pa/tt/ern"}}},
-        parse_dest("/exchange/te%2Fst/pa%2Ftt%2Fern")),
-
-    ok.
-
-parse_dest(Destination, Params) ->
-    rabbit_routing_util:parse_endpoint(Destination, Params).
-parse_dest(Destination) ->
-    rabbit_routing_util:parse_endpoint(Destination).
-
-%%%%
-%%
-%% This is an example of how the client interaction should work
-%%
-%%   {ok, Connection} = amqp_connection:start(network),
-%%   {ok, Channel} = amqp_connection:open_channel(Connection),
-%%   %%...do something useful
-%%   amqp_channel:close(Channel),
-%%   amqp_connection:close(Connection).
-%%
-
-lifecycle_test() ->
-    {ok, Connection} = new_connection(),
-    X = <<"x">>,
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    amqp_channel:call(Channel,
-                      #'exchange.declare'{exchange = X,
-                                          type = <<"topic">>}),
-    Parent = self(),
-    [spawn(fun () -> queue_exchange_binding(Channel, X, Parent, Tag) end)
-     || Tag <- lists:seq(1, ?Latch)],
-    latch_loop(),
-    amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
-    teardown(Connection, Channel),
-    ok.
-
-direct_no_user_test() ->
-    {ok, Connection} = new_connection(just_direct, [{username, none},
-                                                    {password, none}]),
-    amqp_connection:close(Connection),
-    wait_for_death(Connection).
-
-direct_no_password_test() ->
-    {ok, Connection} = new_connection(just_direct, [{username, <<"guest">>},
-                                                    {password, none}]),
-    amqp_connection:close(Connection),
-    wait_for_death(Connection).
-
-direct_no_vhost_test() ->
-    {error, not_allowed} = new_connection(
-                             just_direct, [{username, <<"guest">>},
-                                           {virtual_host, <<"/noexist">>}]),
-    ok.
-
-network_no_vhost_test() ->
-    {error, not_allowed} =
-        new_connection(just_network, [{username, <<"guest">>},
-                                      {virtual_host, <<"/noexist">>}]),
-    ok.
-
-queue_exchange_binding(Channel, X, Parent, Tag) ->
-    receive
-        nothing -> ok
-    after (?Latch - Tag rem 7) * 10 ->
-        ok
-    end,
-    Q = <<"a.b.c", Tag:32>>,
-    Binding = <<"a.b.c.*">>,
-    #'queue.declare_ok'{queue = Q1}
-        = amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
-    ?assertMatch(Q, Q1),
-    Route = #'queue.bind'{queue = Q,
-                          exchange = X,
-                          routing_key = Binding},
-    amqp_channel:call(Channel, Route),
-    amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
-    Parent ! finished.
-
-nowait_exchange_declare_test() ->
-    {ok, Connection} = new_connection(),
-    X = <<"x">>,
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    ?assertEqual(
-      ok,
-      amqp_channel:call(Channel, #'exchange.declare'{exchange = X,
-                                                     type = <<"topic">>,
-                                                     nowait = true})),
-    teardown(Connection, Channel).
-
-channel_lifecycle_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    amqp_channel:close(Channel),
-    {ok, Channel2} = amqp_connection:open_channel(Connection),
-    teardown(Connection, Channel2),
-    ok.
-
-abstract_method_serialization_test(BeforeFun, MultiOpFun, AfterFun) ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    X = <<"test">>,
-    Payload = list_to_binary(["x" || _ <- lists:seq(1, 1000)]),
-    OpsPerProcess = 20,
-    #'exchange.declare_ok'{} =
-        amqp_channel:call(Channel, #'exchange.declare'{exchange = X,
-                                                       type = <<"topic">>}),
-    BeforeRet = BeforeFun(Channel, X),
-    Parent = self(),
-    [spawn(fun () -> Ret = [MultiOpFun(Channel, X, Payload, BeforeRet, I)
-                            || _ <- lists:seq(1, OpsPerProcess)],
-                   Parent ! {finished, Ret}
-           end) || I <- lists:seq(1, ?Latch)],
-    MultiOpRet = latch_loop(),
-    AfterFun(Channel, X, Payload, BeforeRet, MultiOpRet),
-    amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
-    teardown(Connection, Channel).
-
-%% This is designed to exercize the internal queuing mechanism
-%% to ensure that sync methods are properly serialized
-sync_method_serialization_test() ->
-    abstract_method_serialization_test(
-        fun (_, _) -> ok end,
-        fun (Channel, _, _, _, Count) ->
-                Q = fmt("test-~p", [Count]),
-                #'queue.declare_ok'{queue = Q1} =
-                    amqp_channel:call(Channel,
-                                      #'queue.declare'{queue     = Q,
-                                                       exclusive = true}),
-                ?assertMatch(Q, Q1)
-        end,
-        fun (_, _, _, _, _) -> ok end).
-
-%% This is designed to exercize the internal queuing mechanism
-%% to ensure that sending async methods and then a sync method is serialized
-%% properly
-async_sync_method_serialization_test() ->
-    abstract_method_serialization_test(
-        fun (Channel, _X) ->
-                #'queue.declare_ok'{queue = Q} =
-                    amqp_channel:call(Channel, #'queue.declare'{}),
-                Q
-        end,
-        fun (Channel, X, Payload, _, _) ->
-                %% The async methods
-                ok = amqp_channel:call(Channel,
-                                       #'basic.publish'{exchange = X,
-                                                        routing_key = <<"a">>},
-                                       #amqp_msg{payload = Payload})
-        end,
-        fun (Channel, X, _, Q, _) ->
-                %% The sync method
-                #'queue.bind_ok'{} =
-                    amqp_channel:call(Channel,
-                                      #'queue.bind'{exchange = X,
-                                                    queue = Q,
-                                                    routing_key = <<"a">>}),
-                %% No message should have been routed
-                #'queue.declare_ok'{message_count = 0} =
-                    amqp_channel:call(Channel,
-                                      #'queue.declare'{queue = Q,
-                                                       passive = true})
-        end).
-
-%% This is designed to exercize the internal queuing mechanism
-%% to ensure that sending sync methods and then an async method is serialized
-%% properly
-sync_async_method_serialization_test() ->
-    abstract_method_serialization_test(
-        fun (_, _) -> ok end,
-        fun (Channel, X, _Payload, _, _) ->
-                %% The sync methods (called with cast to resume immediately;
-                %% the order should still be preserved)
-                #'queue.declare_ok'{queue = Q} =
-                    amqp_channel:call(Channel,
-                                      #'queue.declare'{exclusive = true}),
-                amqp_channel:cast(Channel, #'queue.bind'{exchange = X,
-                                                         queue = Q,
-                                                         routing_key= <<"a">>}),
-                Q
-        end,
-        fun (Channel, X, Payload, _, MultiOpRet) ->
-                #'confirm.select_ok'{} = amqp_channel:call(
-                                           Channel, #'confirm.select'{}),
-                ok = amqp_channel:call(Channel,
-                                       #'basic.publish'{exchange = X,
-                                                        routing_key = <<"a">>},
-                                       #amqp_msg{payload = Payload}),
-                %% All queues must have gotten this message
-                true = amqp_channel:wait_for_confirms(Channel),
-                lists:foreach(
-                    fun (Q) ->
-                            #'queue.declare_ok'{message_count = 1} =
-                                amqp_channel:call(
-                                  Channel, #'queue.declare'{queue   = Q,
-                                                            passive = true})
-                    end, lists:flatten(MultiOpRet))
-        end).
-
-queue_unbind_test() ->
-    {ok, Connection} = new_connection(),
-    X = <<"eggs">>, Q = <<"foobar">>, Key = <<"quay">>,
-    Payload = <<"foobar">>,
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
-    amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
-    Bind = #'queue.bind'{queue = Q,
-                         exchange = X,
-                         routing_key = Key},
-    amqp_channel:call(Channel, Bind),
-    Publish = #'basic.publish'{exchange = X, routing_key = Key},
-    amqp_channel:call(Channel, Publish, Msg = #amqp_msg{payload = Payload}),
-    get_and_assert_equals(Channel, Q, Payload),
-    Unbind = #'queue.unbind'{queue = Q,
-                             exchange = X,
-                             routing_key = Key},
-    amqp_channel:call(Channel, Unbind),
-    amqp_channel:call(Channel, Publish, Msg),
-    get_and_assert_empty(Channel, Q),
-    teardown(Connection, Channel).
-
-get_and_assert_empty(Channel, Q) ->
-    #'basic.get_empty'{}
-        = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = true}).
-
-get_and_assert_equals(Channel, Q, Payload) ->
-    get_and_assert_equals(Channel, Q, Payload, true).
-
-get_and_assert_equals(Channel, Q, Payload, NoAck) ->
-    {GetOk = #'basic.get_ok'{}, Content}
-        = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = NoAck}),
-    #amqp_msg{payload = Payload2} = Content,
-    ?assertMatch(Payload, Payload2),
-    GetOk.
-
-basic_get_test() ->
-    basic_get_test1(new_connection()).
-
-basic_get_ipv6_test() ->
-    basic_get_test1(new_connection(just_network, [{host, "::1"}])).
-
-basic_get_test1({ok, Connection}) ->
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    {ok, Q} = setup_publish(Channel),
-    get_and_assert_equals(Channel, Q, <<"foobar">>),
-    get_and_assert_empty(Channel, Q),
-    teardown(Connection, Channel).
-
-basic_return_test() ->
-    {ok, Connection} = new_connection(),
-    X = <<"test">>,
-    Q = <<"test">>,
-    Key = <<"test">>,
-    Payload = <<"qwerty">>,
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    amqp_channel:register_return_handler(Channel, self()),
-    amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
-    amqp_channel:call(Channel, #'queue.declare'{queue = Q,
-                                                exclusive = true}),
-    Publish = #'basic.publish'{exchange = X, routing_key = Key,
-                               mandatory = true},
-    amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
-    receive
-        {BasicReturn = #'basic.return'{}, Content} ->
-            #'basic.return'{reply_code = ReplyCode,
-                            exchange = X} = BasicReturn,
-            ?assertMatch(?NO_ROUTE, ReplyCode),
-            #amqp_msg{payload = Payload2} = Content,
-            ?assertMatch(Payload, Payload2);
-        WhatsThis1 ->
-            exit({bad_message, WhatsThis1})
-    after 2000 ->
-        exit(no_return_received)
-    end,
-    amqp_channel:unregister_return_handler(Channel),
-    Publish = #'basic.publish'{exchange = X, routing_key = Key,
-                               mandatory = true},
-    amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
-    ok = receive
-             {_BasicReturn = #'basic.return'{}, _Content} ->
-                 unexpected_return;
-             WhatsThis2 ->
-                 exit({bad_message, WhatsThis2})
-         after 2000 ->
-                 ok
-         end,
-    amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
-    teardown(Connection, Channel).
-
-channel_repeat_open_close_test() ->
-    {ok, Connection} = new_connection(),
-    lists:foreach(
-        fun(_) ->
-            {ok, Ch} = amqp_connection:open_channel(Connection),
-            ok = amqp_channel:close(Ch)
-        end, lists:seq(1, 50)),
-    amqp_connection:close(Connection),
-    wait_for_death(Connection).
-
-channel_multi_open_close_test() ->
-    {ok, Connection} = new_connection(),
-    [spawn_link(
-        fun() ->
-            try amqp_connection:open_channel(Connection) of
-                {ok, Ch}           -> try amqp_channel:close(Ch) of
-                                          ok                 -> ok;
-                                          closing            -> ok
-                                      catch
-                                          exit:{noproc, _} -> ok;
-                                          exit:{normal, _} -> ok
-                                      end;
-                closing            -> ok
-            catch
-                exit:{noproc, _} -> ok;
-                exit:{normal, _} -> ok
-            end
-        end) || _ <- lists:seq(1, 50)],
-    erlang:yield(),
-    amqp_connection:close(Connection),
-    wait_for_death(Connection).
-
-basic_ack_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    {ok, Q} = setup_publish(Channel),
-    {#'basic.get_ok'{delivery_tag = Tag}, _}
-        = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = false}),
-    amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag}),
-    teardown(Connection, Channel).
-
-basic_ack_call_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    {ok, Q} = setup_publish(Channel),
-    {#'basic.get_ok'{delivery_tag = Tag}, _}
-        = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = false}),
-    amqp_channel:call(Channel, #'basic.ack'{delivery_tag = Tag}),
-    teardown(Connection, Channel).
-
-basic_consume_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    X = <<"test">>,
-    amqp_channel:call(Channel, #'exchange.declare'{exchange = X}),
-    RoutingKey = <<"key">>,
-    Parent = self(),
-    [spawn_link(fun () ->
-                        consume_loop(Channel, X, RoutingKey, Parent, <<Tag:32>>)
-                end) || Tag <- lists:seq(1, ?Latch)],
-    timer:sleep(?Latch * 20),
-    Publish = #'basic.publish'{exchange = X, routing_key = RoutingKey},
-    amqp_channel:call(Channel, Publish, #amqp_msg{payload = <<"foobar">>}),
-    latch_loop(),
-    amqp_channel:call(Channel, #'exchange.delete'{exchange = X}),
-    teardown(Connection, Channel).
-
-consume_loop(Channel, X, RoutingKey, Parent, Tag) ->
-    #'queue.declare_ok'{queue = Q} =
-        amqp_channel:call(Channel, #'queue.declare'{}),
-    #'queue.bind_ok'{} =
-        amqp_channel:call(Channel, #'queue.bind'{queue = Q,
-                                                 exchange = X,
-                                                 routing_key = RoutingKey}),
-    #'basic.consume_ok'{} =
-        amqp_channel:call(Channel,
-                          #'basic.consume'{queue = Q, consumer_tag = Tag}),
-    receive #'basic.consume_ok'{consumer_tag = Tag} -> ok end,
-    receive {#'basic.deliver'{}, _} -> ok end,
-    #'basic.cancel_ok'{} =
-        amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = Tag}),
-    receive #'basic.cancel_ok'{consumer_tag = Tag} -> ok end,
-    Parent ! finished.
-
-consume_notification_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    #'queue.declare_ok'{queue = Q} =
-        amqp_channel:call(Channel, #'queue.declare'{}),
-    #'basic.consume_ok'{consumer_tag = CTag} = ConsumeOk =
-        amqp_channel:call(Channel, #'basic.consume'{queue = Q}),
-    receive ConsumeOk -> ok end,
-    #'queue.delete_ok'{} =
-        amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
-    receive #'basic.cancel'{consumer_tag = CTag} -> ok end,
-    amqp_channel:close(Channel),
-    ok.
-
-basic_recover_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(
-                        Connection, {amqp_direct_consumer, [self()]}),
-    #'queue.declare_ok'{queue = Q} =
-        amqp_channel:call(Channel, #'queue.declare'{}),
-    #'basic.consume_ok'{consumer_tag = Tag} =
-        amqp_channel:call(Channel, #'basic.consume'{queue = Q}),
-    receive #'basic.consume_ok'{consumer_tag = Tag} -> ok end,
-    Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
-    amqp_channel:call(Channel, Publish, #amqp_msg{payload = <<"foobar">>}),
-    receive
-        {#'basic.deliver'{consumer_tag = Tag}, _} ->
-            %% no_ack set to false, but don't send ack
-            ok
-    end,
-    BasicRecover = #'basic.recover'{requeue = true},
-    amqp_channel:cast(Channel, BasicRecover),
-    receive
-        {#'basic.deliver'{consumer_tag = Tag,
-                          delivery_tag = DeliveryTag2}, _} ->
-            amqp_channel:cast(Channel,
-                              #'basic.ack'{delivery_tag = DeliveryTag2})
-    end,
-    teardown(Connection, Channel).
-
-simultaneous_close_test() ->
-    {ok, Connection} = new_connection(),
-    ChannelNumber = 5,
-    {ok, Channel1} = amqp_connection:open_channel(Connection, ChannelNumber),
-
-    %% Publish to non-existent exchange and immediately close channel
-    amqp_channel:cast(Channel1, #'basic.publish'{exchange = <<"does-not-exist">>,
-                                                 routing_key = <<"a">>},
-                               #amqp_msg{payload = <<"foobar">>}),
-    try amqp_channel:close(Channel1) of
-        ok      -> wait_for_death(Channel1);
-        closing -> wait_for_death(Channel1)
-    catch
-        exit:{noproc, _}                                              -> ok;
-        exit:{{shutdown, {server_initiated_close, ?NOT_FOUND, _}}, _} -> ok
-    end,
-
-    %% Channel2 (opened with the exact same number as Channel1)
-    %% should not receive a close_ok (which is intended for Channel1)
-    {ok, Channel2} = amqp_connection:open_channel(Connection, ChannelNumber),
-
-    %% Make sure Channel2 functions normally
-    #'exchange.declare_ok'{} =
-        amqp_channel:call(Channel2, #'exchange.declare'{exchange = <<"test">>}),
-    #'exchange.delete_ok'{} =
-        amqp_channel:call(Channel2, #'exchange.delete'{exchange = <<"test">>}),
-
-    teardown(Connection, Channel2).
-
-channel_tune_negotiation_test() ->
-    {ok, Connection} = new_connection([{channel_max, 10}]),
-    amqp_connection:close(Connection).
-
-basic_qos_test() ->
-    [NoQos, Qos] = [basic_qos_test(Prefetch) || Prefetch <- [0,1]],
-    ExpectedRatio = (1+1) / (1+50/5),
-    FudgeFactor = 2, %% account for timing variations
-    ?assertMatch(true, Qos / NoQos < ExpectedRatio * FudgeFactor).
-
-basic_qos_test(Prefetch) ->
-    {ok, Connection} = new_connection(),
-    Messages = 100,
-    Workers = [5, 50],
-    Parent = self(),
-    {ok, Chan} = amqp_connection:open_channel(Connection),
-    #'queue.declare_ok'{queue = Q} =
-        amqp_channel:call(Chan, #'queue.declare'{}),
-    Kids = [spawn(
-            fun() ->
-                {ok, Channel} = amqp_connection:open_channel(Connection),
-                amqp_channel:call(Channel,
-                                  #'basic.qos'{prefetch_count = Prefetch}),
-                amqp_channel:call(Channel,
-                                  #'basic.consume'{queue = Q}),
-                Parent ! finished,
-                sleeping_consumer(Channel, Sleep, Parent)
-            end) || Sleep <- Workers],
-    latch_loop(length(Kids)),
-    spawn(fun() -> {ok, Channel} = amqp_connection:open_channel(Connection),
-                   producer_loop(Channel, Q, Messages)
-          end),
-    {Res, _} = timer:tc(erlang, apply, [fun latch_loop/1, [Messages]]),
-    [Kid ! stop || Kid <- Kids],
-    latch_loop(length(Kids)),
-    teardown(Connection, Chan),
-    Res.
-
-sleeping_consumer(Channel, Sleep, Parent) ->
-    receive
-        stop ->
-            do_stop(Channel, Parent);
-        #'basic.consume_ok'{} ->
-            sleeping_consumer(Channel, Sleep, Parent);
-        #'basic.cancel_ok'{}  ->
-            exit(unexpected_cancel_ok);
-        {#'basic.deliver'{delivery_tag = DeliveryTag}, _Content} ->
-            Parent ! finished,
-            receive stop -> do_stop(Channel, Parent)
-            after Sleep -> ok
-            end,
-            amqp_channel:cast(Channel,
-                              #'basic.ack'{delivery_tag = DeliveryTag}),
-            sleeping_consumer(Channel, Sleep, Parent)
-    end.
-
-do_stop(Channel, Parent) ->
-    Parent ! finished,
-    amqp_channel:close(Channel),
-    wait_for_death(Channel),
-    exit(normal).
-
-producer_loop(Channel, _RoutingKey, 0) ->
-    amqp_channel:close(Channel),
-    wait_for_death(Channel),
-    ok;
-
-producer_loop(Channel, RoutingKey, N) ->
-    Publish = #'basic.publish'{exchange = <<>>, routing_key = RoutingKey},
-    amqp_channel:call(Channel, Publish, #amqp_msg{payload = <<>>}),
-    producer_loop(Channel, RoutingKey, N - 1).
-
-confirm_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
-    amqp_channel:register_confirm_handler(Channel, self()),
-    {ok, Q} = setup_publish(Channel),
-    {#'basic.get_ok'{}, _}
-        = amqp_channel:call(Channel, #'basic.get'{queue = Q, no_ack = false}),
-    ok = receive
-             #'basic.ack'{}  -> ok;
-             #'basic.nack'{} -> fail
-         after 2000 ->
-                 exit(did_not_receive_pub_ack)
-         end,
-    teardown(Connection, Channel).
-
-confirm_barrier_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
-    [amqp_channel:call(Channel, #'basic.publish'{routing_key = <<"whoosh">>},
-                       #amqp_msg{payload = <<"foo">>})
-     || _ <- lists:seq(1, 1000)], %% Hopefully enough to get a multi-ack
-    true = amqp_channel:wait_for_confirms(Channel),
-    teardown(Connection, Channel).
-
-confirm_select_before_wait_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    try amqp_channel:wait_for_confirms(Channel) of
-        _ -> exit(success_despite_lack_of_confirm_mode)
-    catch
-        not_in_confirm_mode -> ok
-    end,
-    teardown(Connection, Channel).
-
-confirm_barrier_timeout_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
-    [amqp_channel:call(Channel, #'basic.publish'{routing_key = <<"whoosh">>},
-                       #amqp_msg{payload = <<"foo">>})
-     || _ <- lists:seq(1, 1000)],
-    case amqp_channel:wait_for_confirms(Channel, 0) of
-        true    -> ok;
-        timeout -> ok
-    end,
-    teardown(Connection, Channel).
-
-confirm_barrier_die_timeout_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    #'confirm.select_ok'{} = amqp_channel:call(Channel, #'confirm.select'{}),
-    [amqp_channel:call(Channel, #'basic.publish'{routing_key = <<"whoosh">>},
-                       #amqp_msg{payload = <<"foo">>})
-     || _ <- lists:seq(1, 1000)],
-    try amqp_channel:wait_for_confirms_or_die(Channel, 0) of
-        true    -> ok
-    catch
-        exit:timeout -> ok
-    end,
-    amqp_connection:close(Connection),
-    wait_for_death(Connection).
-
-default_consumer_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    amqp_selective_consumer:register_default_consumer(Channel, self()),
-
-    #'queue.declare_ok'{queue = Q}
-        = amqp_channel:call(Channel, #'queue.declare'{}),
-    Pid = spawn(fun () -> receive
-                          after 10000 -> ok
-                          end
-                end),
-    #'basic.consume_ok'{} =
-        amqp_channel:subscribe(Channel, #'basic.consume'{queue = Q}, Pid),
-    erlang:monitor(process, Pid),
-    exit(Pid, shutdown),
-    receive
-        {'DOWN', _, process, _, _} ->
-            io:format("little consumer died out~n")
-    end,
-    Payload = <<"for the default consumer">>,
-    amqp_channel:call(Channel,
-                      #'basic.publish'{exchange = <<>>, routing_key = Q},
-                      #amqp_msg{payload = Payload}),
-
-    receive
-        {#'basic.deliver'{}, #'amqp_msg'{payload = Payload}} ->
-            ok
-    after 1000 ->
-            exit('default_consumer_didnt_work')
-    end,
-    teardown(Connection, Channel).
-
-subscribe_nowait_test() ->
-    {ok, Conn} = new_connection(),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    {ok, Q} = setup_publish(Ch),
-    CTag = <<"ctag">>,
-    amqp_selective_consumer:register_default_consumer(Ch, self()),
-    ok = amqp_channel:call(Ch, #'basic.consume'{queue        = Q,
-                                                consumer_tag = CTag,
-                                                nowait       = true}),
-    ok = amqp_channel:call(Ch, #'basic.cancel' {consumer_tag = CTag,
-                                                nowait       = true}),
-    ok = amqp_channel:call(Ch, #'basic.consume'{queue        = Q,
-                                                consumer_tag = CTag,
-                                                nowait       = true}),
-    receive
-        #'basic.consume_ok'{} ->
-            exit(unexpected_consume_ok);
-        {#'basic.deliver'{delivery_tag = DTag}, _Content} ->
-            amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag})
-    end,
-    teardown(Conn, Ch).
-
-basic_nack_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    #'queue.declare_ok'{queue = Q}
-        = amqp_channel:call(Channel, #'queue.declare'{}),
-
-    Payload = <<"m1">>,
-
-    amqp_channel:call(Channel,
-                      #'basic.publish'{exchange = <<>>, routing_key = Q},
-                      #amqp_msg{payload = Payload}),
-
-    #'basic.get_ok'{delivery_tag = Tag} =
-        get_and_assert_equals(Channel, Q, Payload, false),
-
-    amqp_channel:call(Channel, #'basic.nack'{delivery_tag = Tag,
-                                             multiple     = false,
-                                             requeue      = false}),
-
-    get_and_assert_empty(Channel, Q),
-    teardown(Connection, Channel).
-
-large_content_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    #'queue.declare_ok'{queue = Q}
-        = amqp_channel:call(Channel, #'queue.declare'{}),
-    random:seed(erlang:phash2([node()]),
-                time_compat:monotonic_time(),
-                time_compat:unique_integer()),
-    F = list_to_binary([random:uniform(256)-1 || _ <- lists:seq(1, 1000)]),
-    Payload = list_to_binary([[F || _ <- lists:seq(1, 1000)]]),
-    Publish = #'basic.publish'{exchange = <<>>, routing_key = Q},
-    amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
-    get_and_assert_equals(Channel, Q, Payload),
-    teardown(Connection, Channel).
-
-%% ----------------------------------------------------------------------------
-%% Test for the network client
-%% Sends a bunch of messages and immediatly closes the connection without
-%% closing the channel. Then gets the messages back from the queue and expects
-%% all of them to have been sent.
-pub_and_close_test() ->
-    {ok, Connection1} = new_connection(just_network),
-    Payload = <<"eggs">>,
-    NMessages = 50000,
-    {ok, Channel1} = amqp_connection:open_channel(Connection1),
-    #'queue.declare_ok'{queue = Q} =
-        amqp_channel:call(Channel1, #'queue.declare'{}),
-    %% Send messages
-    pc_producer_loop(Channel1, <<>>, Q, Payload, NMessages),
-    %% Close connection without closing channels
-    amqp_connection:close(Connection1),
-    %% Get sent messages back and count them
-    {ok, Connection2} = new_connection(just_network),
-    {ok, Channel2} = amqp_connection:open_channel(
-                         Connection2, {amqp_direct_consumer, [self()]}),
-    amqp_channel:call(Channel2, #'basic.consume'{queue = Q, no_ack = true}),
-    receive #'basic.consume_ok'{} -> ok end,
-    ?assert(pc_consumer_loop(Channel2, Payload, 0) == NMessages),
-    %% Make sure queue is empty
-    #'queue.declare_ok'{queue = Q, message_count = NRemaining} =
-        amqp_channel:call(Channel2, #'queue.declare'{queue   = Q,
-                                                     passive = true}),
-    ?assert(NRemaining == 0),
-    amqp_channel:call(Channel2, #'queue.delete'{queue = Q}),
-    teardown(Connection2, Channel2),
-    ok.
-
-pc_producer_loop(_, _, _, _, 0) -> ok;
-pc_producer_loop(Channel, X, Key, Payload, NRemaining) ->
-    Publish = #'basic.publish'{exchange = X, routing_key = Key},
-    ok = amqp_channel:call(Channel, Publish, #amqp_msg{payload = Payload}),
-    pc_producer_loop(Channel, X, Key, Payload, NRemaining - 1).
-
-pc_consumer_loop(Channel, Payload, NReceived) ->
-    receive
-        {#'basic.deliver'{},
-         #amqp_msg{payload = DeliveredPayload}} ->
-            case DeliveredPayload of
-                Payload ->
-                    pc_consumer_loop(Channel, Payload, NReceived + 1);
-                _ ->
-                    exit(received_unexpected_content)
-            end
-    after 1000 ->
-        NReceived
-    end.
-
-%%---------------------------------------------------------------------------
-%% This tests whether RPC over AMQP produces the same result as invoking the
-%% same argument against the same underlying gen_server instance.
-rpc_test() ->
-    {ok, Connection} = new_connection(),
-    Fun = fun(X) -> X + 1 end,
-    RPCHandler = fun(X) -> term_to_binary(Fun(binary_to_term(X))) end,
-    Q = <<"rpc-test">>,
-    Server = amqp_rpc_server:start(Connection, Q, RPCHandler),
-    Client = amqp_rpc_client:start(Connection, Q),
-    Input = 1,
-    Reply = amqp_rpc_client:call(Client, term_to_binary(Input)),
-    Expected = Fun(Input),
-    DecodedReply = binary_to_term(Reply),
-    ?assertMatch(Expected, DecodedReply),
-    amqp_rpc_client:stop(Client),
-    amqp_rpc_server:stop(Server),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
-    amqp_connection:close(Connection),
-    wait_for_death(Connection),
-    ok.
-
-%% This tests if the RPC continues to generate valid correlation ids
-%% over a series of requests.
-rpc_client_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    Q = <<"rpc-test">>,
-    Latch = 255, % enough requests to tickle bad correlation ids
-    %% Start a server to return correlation ids to the client.
-    Server = spawn_link(fun() ->
-                                rpc_correlation_server(Channel, Q)
-                        end),
-    %% Generate a series of RPC requests on the same client.
-    Client = amqp_rpc_client:start(Connection, Q),
-    Parent = self(),
-    [spawn(fun() ->
-                   Reply = amqp_rpc_client:call(Client, <<>>),
-                   Parent ! {finished, Reply}
-           end) || _ <- lists:seq(1, Latch)],
-    %% Verify that the correlation ids are valid UTF-8 strings.
-    CorrelationIds = latch_loop(Latch),
-    [?assertMatch(<<_/binary>>, DecodedId)
-     || DecodedId <- [unicode:characters_to_binary(Id, utf8)
-                      || Id <- CorrelationIds]],
-    %% Cleanup.
-    Server ! stop,
-    amqp_rpc_client:stop(Client),
-    amqp_channel:call(Channel, #'queue.delete'{queue = Q}),
-    teardown(Connection, Channel),
-    ok.
-
-%% Consumer of RPC requests that replies with the CorrelationId.
-rpc_correlation_server(Channel, Q) ->
-    amqp_channel:register_return_handler(Channel, self()),
-    amqp_channel:call(Channel, #'queue.declare'{queue = Q}),
-    amqp_channel:call(Channel, #'basic.consume'{queue = Q,
-                                                consumer_tag = <<"server">>}),
-    rpc_client_consume_loop(Channel),
-    amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = <<"server">>}),
-    amqp_channel:unregister_return_handler(Channel).
-
-rpc_client_consume_loop(Channel) ->
-    receive
-        stop ->
-            ok;
-        {#'basic.deliver'{delivery_tag = DeliveryTag},
-         #amqp_msg{props = Props}} ->
-            #'P_basic'{correlation_id = CorrelationId,
-                       reply_to = Q} = Props,
-            Properties = #'P_basic'{correlation_id = CorrelationId},
-            Publish = #'basic.publish'{exchange = <<>>,
-                                       routing_key = Q,
-                                       mandatory = true},
-            amqp_channel:call(
-              Channel, Publish, #amqp_msg{props = Properties,
-                                          payload = CorrelationId}),
-            amqp_channel:call(
-              Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
-            rpc_client_consume_loop(Channel);
-        _ ->
-            rpc_client_consume_loop(Channel)
-    after 3000 ->
-            exit(no_request_received)
-    end.
-
-%%---------------------------------------------------------------------------
-
-%% connection.blocked, connection.unblocked
-
-connection_blocked_network_test() ->
-    {ok, Connection} = new_connection(just_network),
-    X = <<"amq.direct">>,
-    K = Payload = <<"x">>,
-    clear_resource_alarm(memory),
-    timer:sleep(1000),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    Parent = self(),
-    Child = spawn_link(
-              fun() ->
-                      receive
-                          #'connection.blocked'{} -> ok
-                      end,
-                      clear_resource_alarm(memory),
-                      receive
-                          #'connection.unblocked'{} -> ok
-                      end,
-                      Parent ! ok
-              end),
-    amqp_connection:register_blocked_handler(Connection, Child),
-    set_resource_alarm(memory),
-    Publish = #'basic.publish'{exchange = X,
-                               routing_key = K},
-    amqp_channel:call(Channel, Publish,
-                      #amqp_msg{payload = Payload}),
-    timer:sleep(1000),
-    receive
-        ok ->
-            clear_resource_alarm(memory),
-            clear_resource_alarm(disk),
-            ok
-    after 10000 ->
-        clear_resource_alarm(memory),
-        clear_resource_alarm(disk),
-        exit(did_not_receive_connection_blocked)
-    end.
-
-%%---------------------------------------------------------------------------
-
-setup_publish(Channel) ->
-    #'queue.declare_ok'{queue = Q} =
-        amqp_channel:call(Channel, #'queue.declare'{exclusive = true}),
-    ok = amqp_channel:call(Channel, #'basic.publish'{exchange    = <<>>,
-                                                     routing_key = Q},
-                           #amqp_msg{payload = <<"foobar">>}),
-    {ok, Q}.
-
-teardown(Connection, Channel) ->
-    amqp_channel:close(Channel),
-    wait_for_death(Channel),
-    amqp_connection:close(Connection),
-    wait_for_death(Connection).
-
-teardown_test() ->
-    {ok, Connection} = new_connection(),
-    {ok, Channel} = amqp_connection:open_channel(Connection),
-    ?assertMatch(true, is_process_alive(Channel)),
-    ?assertMatch(true, is_process_alive(Connection)),
-    teardown(Connection, Channel),
-    ?assertMatch(false, is_process_alive(Channel)),
-    ?assertMatch(false, is_process_alive(Connection)).
-
-wait_for_death(Pid) ->
-    Ref = erlang:monitor(process, Pid),
-    receive {'DOWN', Ref, process, Pid, _Reason} -> ok
-    after ?DeathWait -> exit({timed_out_waiting_for_process_death, Pid})
-    end.
-
-latch_loop() ->
-    latch_loop(?Latch, []).
-
-latch_loop(Latch) ->
-    latch_loop(Latch, []).
-
-latch_loop(0, Acc) ->
-    Acc;
-latch_loop(Latch, Acc) ->
-    receive
-        finished        -> latch_loop(Latch - 1, Acc);
-        {finished, Ret} -> latch_loop(Latch - 1, [Ret | Acc])
-    after ?Latch * ?Wait -> exit(waited_too_long)
-    end.
-
-new_connection() ->
-    new_connection(both, []).
-
-new_connection(AllowedConnectionTypes) when is_atom(AllowedConnectionTypes) ->
-    new_connection(AllowedConnectionTypes, []);
-new_connection(Params) when is_list(Params) ->
-    new_connection(both, Params).
-
-new_connection(AllowedConnectionTypes, Params) ->
-    Params1 =
-        case {AllowedConnectionTypes,
-              os:getenv("AMQP_CLIENT_TEST_CONNECTION_TYPE")} of
-            {just_direct, "network"} ->
-                exit(normal);
-            {just_direct, "network_ssl"} ->
-                exit(normal);
-            {just_network, "direct"} ->
-                exit(normal);
-            {_, "network"} ->
-                make_network_params(Params);
-            {_, "network_ssl"} ->
-                {ok, [[CertsDir]]} = init:get_argument(erlang_client_ssl_dir),
-                make_network_params(
-                  [{ssl_options, [{cacertfile,
-                                   CertsDir ++ "/testca/cacert.pem"},
-                                  {certfile, CertsDir ++ "/client/cert.pem"},
-                                  {keyfile, CertsDir ++ "/client/key.pem"},
-                                  {verify, verify_peer},
-                                  {fail_if_no_peer_cert, true}]}] ++ Params);
-            {_, "direct"} ->
-                make_direct_params([{node, rabbit_nodes:make(rabbit)}] ++
-                                       Params)
-        end,
-    amqp_connection:start(Params1).
-
-%% Note: not all amqp_params_network fields supported.
-make_network_params(Props) ->
-    Pgv = fun (Key, Default) ->
-                  proplists:get_value(Key, Props, Default)
-          end,
-    #amqp_params_network{username     = Pgv(username, <<"guest">>),
-                         password     = Pgv(password, <<"guest">>),
-                         virtual_host = Pgv(virtual_host, <<"/">>),
-                         channel_max  = Pgv(channel_max, 0),
-                         ssl_options  = Pgv(ssl_options, none),
-                         host         = Pgv(host, "localhost")}.
-
-%% Note: not all amqp_params_direct fields supported.
-make_direct_params(Props) ->
-    Pgv = fun (Key, Default) ->
-                  proplists:get_value(Key, Props, Default)
-          end,
-    #amqp_params_direct{username     = Pgv(username, <<"guest">>),
-                        password     = Pgv(password, <<"guest">>),
-                        virtual_host = Pgv(virtual_host, <<"/">>),
-                        node         = Pgv(node, node())}.
-
-make_cmd() ->
-    case os:getenv("MAKE") of
-        false -> "make";
-        Cmd   -> Cmd
-    end.
-
-set_resource_alarm(memory) ->
-    os:cmd(make_cmd() ++ " set-resource-alarm SOURCE=memory");
-set_resource_alarm(disk) ->
-    os:cmd(make_cmd() ++ " set-resource-alarm SOURCE=disk").
-
-clear_resource_alarm(memory) ->
-    os:cmd(make_cmd() ++ " clear-resource-alarm SOURCE=memory");
-clear_resource_alarm(disk) ->
-    os:cmd(make_cmd() ++ " clear-resource-alarm SOURCE=disk").
-
-fmt(Fmt, Args) -> list_to_binary(rabbit_misc:format(Fmt, Args)).
diff --git a/rabbitmq-server/deps/amqp_client/test/unit_SUITE.erl b/rabbitmq-server/deps/amqp_client/test/unit_SUITE.erl
new file mode 100644 (file)
index 0000000..fbc2a2d
--- /dev/null
@@ -0,0 +1,256 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(unit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include("amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+    [
+      amqp_uri_parsing,
+      route_destination_parsing
+    ].
+
+%% -------------------------------------------------------------------
+%% AMQP URI parsing.
+%% -------------------------------------------------------------------
+
+amqp_uri_parsing(_Config) ->
+    %% From the spec (adapted)
+    ?assertMatch({ok, #amqp_params_network{username     = <<"user">>,
+                                           password     = <<"pass">>,
+                                           host         = "host",
+                                           port         = 10000,
+                                           virtual_host = <<"vhost">>,
+                                           heartbeat    = 5}},
+                 amqp_uri:parse(
+                   "amqp://user:pass@host:10000/vhost?heartbeat=5")),
+    ?assertMatch({ok, #amqp_params_network{username     = <<"usera">>,
+                                           password     = <<"apass">>,
+                                           host         = "hoast",
+                                           port         = 10000,
+                                           virtual_host = <<"v/host">>}},
+                 amqp_uri:parse(
+                   "aMQp://user%61:%61pass@ho%61st:10000/v%2fhost")),
+    ?assertMatch({ok, #amqp_params_direct{}}, amqp_uri:parse("amqp://")),
+    ?assertMatch({ok, #amqp_params_direct{username     = <<"">>,
+                                          virtual_host = <<"">>}},
+                 amqp_uri:parse("amqp://:@/")),
+    ?assertMatch({ok, #amqp_params_network{username     = <<"">>,
+                                           password     = <<"">>,
+                                           virtual_host = <<"">>,
+                                           host         = "host"}},
+                 amqp_uri:parse("amqp://:@host/")),
+    ?assertMatch({ok, #amqp_params_direct{username = <<"user">>}},
+                 amqp_uri:parse("amqp://user@")),
+    ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+                                           password = <<"pass">>,
+                                           host     = "localhost"}},
+                 amqp_uri:parse("amqp://user:pass@localhost")),
+    ?assertMatch({ok, #amqp_params_network{host         = "host",
+                                           virtual_host = <<"/">>}},
+                 amqp_uri:parse("amqp://host")),
+    ?assertMatch({ok, #amqp_params_network{port = 10000,
+                                           host = "localhost"}},
+                 amqp_uri:parse("amqp://localhost:10000")),
+    ?assertMatch({ok, #amqp_params_direct{virtual_host = <<"vhost">>}},
+                 amqp_uri:parse("amqp:///vhost")),
+    ?assertMatch({ok, #amqp_params_network{host         = "host",
+                                           virtual_host = <<"">>}},
+                 amqp_uri:parse("amqp://host/")),
+    ?assertMatch({ok, #amqp_params_network{host         = "host",
+                                           virtual_host = <<"/">>}},
+                 amqp_uri:parse("amqp://host/%2f")),
+    ?assertMatch({ok, #amqp_params_network{host = "::1"}},
+                 amqp_uri:parse("amqp://[::1]")),
+
+    %% Varous other cases
+    ?assertMatch({ok, #amqp_params_network{host = "host", port = 100}},
+                 amqp_uri:parse("amqp://host:100")),
+    ?assertMatch({ok, #amqp_params_network{host = "::1", port = 100}},
+                 amqp_uri:parse("amqp://[::1]:100")),
+
+    ?assertMatch({ok, #amqp_params_network{host         = "host",
+                                           virtual_host = <<"blah">>}},
+                 amqp_uri:parse("amqp://host/blah")),
+    ?assertMatch({ok, #amqp_params_network{host         = "host",
+                                           port         = 100,
+                                           virtual_host = <<"blah">>}},
+                 amqp_uri:parse("amqp://host:100/blah")),
+    ?assertMatch({ok, #amqp_params_network{host         = "::1",
+                                           virtual_host = <<"blah">>}},
+                 amqp_uri:parse("amqp://[::1]/blah")),
+    ?assertMatch({ok, #amqp_params_network{host         = "::1",
+                                           port         = 100,
+                                           virtual_host = <<"blah">>}},
+                 amqp_uri:parse("amqp://[::1]:100/blah")),
+
+    ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+                                           password = <<"pass">>,
+                                           host     = "host"}},
+                 amqp_uri:parse("amqp://user:pass@host")),
+    ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+                                           password = <<"pass">>,
+                                           port     = 100}},
+                 amqp_uri:parse("amqp://user:pass@host:100")),
+    ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+                                           password = <<"pass">>,
+                                           host     = "::1"}},
+                 amqp_uri:parse("amqp://user:pass@[::1]")),
+    ?assertMatch({ok, #amqp_params_network{username = <<"user">>,
+                                           password = <<"pass">>,
+                                           host     = "::1",
+                                           port     = 100}},
+                 amqp_uri:parse("amqp://user:pass@[::1]:100")),
+
+    %% TLS options
+    {ok, #amqp_params_network{ssl_options = TLSOpts1}} =
+        amqp_uri:parse("amqps://host/%2f?cacertfile=/path/to/cacertfile.pem"),
+    ?assertEqual(lists:usort([{cacertfile,"/path/to/cacertfile.pem"}]),
+                 lists:usort(TLSOpts1)),
+
+    {ok, #amqp_params_network{ssl_options = TLSOpts2}} =
+        amqp_uri:parse("amqps://host/%2f?cacertfile=/path/to/cacertfile.pem"
+                       "&certfile=/path/to/certfile.pem"),
+    ?assertEqual(lists:usort([{certfile,  "/path/to/certfile.pem"},
+                              {cacertfile,"/path/to/cacertfile.pem"}]),
+                 lists:usort(TLSOpts2)),
+
+    {ok, #amqp_params_network{ssl_options = TLSOpts3}} =
+        amqp_uri:parse("amqps://host/%2f?verify=verify_peer"
+                       "&fail_if_no_peer_cert=true"),
+    ?assertEqual(lists:usort([{fail_if_no_peer_cert, true},
+                              {verify,               verify_peer}
+                             ]), lists:usort(TLSOpts3)),
+
+    {ok, #amqp_params_network{ssl_options = TLSOpts4}} =
+        amqp_uri:parse("amqps://host/%2f?cacertfile=/path/to/cacertfile.pem"
+                       "&certfile=/path/to/certfile.pem"
+                       "&password=topsecret"
+                       "&depth=5"),
+    ?assertEqual(lists:usort([{certfile,  "/path/to/certfile.pem"},
+                              {cacertfile,"/path/to/cacertfile.pem"},
+                              {password,  "topsecret"},
+                              {depth,     5}]),
+                 lists:usort(TLSOpts4)),
+
+    %% Various failure cases
+    ?assertMatch({error, _}, amqp_uri:parse("http://www.rabbitmq.com")),
+    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:bar:baz")),
+    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo[::1]")),
+    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:[::1]")),
+    ?assertMatch({error, _}, amqp_uri:parse("amqp://[::1]foo")),
+    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:1000xyz")),
+    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo:1000000")),
+    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo/bar/baz")),
+
+    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo%1")),
+    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo%1x")),
+    ?assertMatch({error, _}, amqp_uri:parse("amqp://foo%xy")),
+
+    ok.
+
+%% -------------------------------------------------------------------
+%% Route destination parsing.
+%% -------------------------------------------------------------------
+
+route_destination_parsing(_Config) ->
+    %% valid queue
+    ?assertMatch({ok, {queue, "test"}}, parse_dest("/queue/test")),
+
+    %% valid topic
+    ?assertMatch({ok, {topic, "test"}}, parse_dest("/topic/test")),
+
+    %% valid exchange
+    ?assertMatch({ok, {exchange, {"test", undefined}}}, parse_dest("/exchange/test")),
+
+    %% valid temp queue
+    ?assertMatch({ok, {temp_queue, "test"}}, parse_dest("/temp-queue/test")),
+
+    %% valid reply queue
+    ?assertMatch({ok, {reply_queue, "test"}}, parse_dest("/reply-queue/test")),
+    ?assertMatch({ok, {reply_queue, "test/2"}}, parse_dest("/reply-queue/test/2")),
+
+    %% valid exchange with pattern
+    ?assertMatch({ok, {exchange, {"test", "pattern"}}},
+        parse_dest("/exchange/test/pattern")),
+
+    %% valid pre-declared queue
+    ?assertMatch({ok, {amqqueue, "test"}}, parse_dest("/amq/queue/test")),
+
+    %% queue without name
+    ?assertMatch({error, {invalid_destination, queue, ""}}, parse_dest("/queue")),
+    ?assertMatch({ok, {queue, undefined}}, parse_dest("/queue", true)),
+
+    %% topic without name
+    ?assertMatch({error, {invalid_destination, topic, ""}}, parse_dest("/topic")),
+
+    %% exchange without name
+    ?assertMatch({error, {invalid_destination, exchange, ""}},
+        parse_dest("/exchange")),
+
+    %% exchange default name
+    ?assertMatch({error, {invalid_destination, exchange, "//foo"}},
+        parse_dest("/exchange//foo")),
+
+    %% amqqueue without name
+    ?assertMatch({error, {invalid_destination, amqqueue, ""}},
+        parse_dest("/amq/queue")),
+
+    %% queue without name with trailing slash
+    ?assertMatch({error, {invalid_destination, queue, "/"}}, parse_dest("/queue/")),
+
+    %% topic without name with trailing slash
+    ?assertMatch({error, {invalid_destination, topic, "/"}}, parse_dest("/topic/")),
+
+    %% exchange without name with trailing slash
+    ?assertMatch({error, {invalid_destination, exchange, "/"}},
+        parse_dest("/exchange/")),
+
+    %% queue with invalid name
+    ?assertMatch({error, {invalid_destination, queue, "/foo/bar"}},
+        parse_dest("/queue/foo/bar")),
+
+    %% topic with invalid name
+    ?assertMatch({error, {invalid_destination, topic, "/foo/bar"}},
+        parse_dest("/topic/foo/bar")),
+
+    %% exchange with invalid name
+    ?assertMatch({error, {invalid_destination, exchange, "/foo/bar/baz"}},
+        parse_dest("/exchange/foo/bar/baz")),
+
+    %% unknown destination
+    ?assertMatch({error, {unknown_destination, "/blah/boo"}},
+        parse_dest("/blah/boo")),
+
+    %% queue with escaped name
+    ?assertMatch({ok, {queue, "te/st"}}, parse_dest("/queue/te%2Fst")),
+
+    %% valid exchange with escaped name and pattern
+    ?assertMatch({ok, {exchange, {"te/st", "pa/tt/ern"}}},
+        parse_dest("/exchange/te%2Fst/pa%2Ftt%2Fern")),
+
+    ok.
+
+parse_dest(Destination, Params) ->
+    rabbit_routing_util:parse_endpoint(Destination, Params).
+parse_dest(Destination) ->
+    rabbit_routing_util:parse_endpoint(Destination).
diff --git a/rabbitmq-server/deps/mochiweb/.editorconfig b/rabbitmq-server/deps/mochiweb/.editorconfig
new file mode 100644 (file)
index 0000000..d03550e
--- /dev/null
@@ -0,0 +1,17 @@
+# EditorConfig file: http://EditorConfig.org
+
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
+
+# 4 space indentation
+[*.{erl,src,hrl}]
+indent_style = space
+indent_size = 4
index af80a19884d1b8aa9ca25584903b2a986880ee2b..1b88f921cfb157982ce30f544e779c31871377fd 100644 (file)
@@ -1,3 +1,8 @@
+Version 2.13.1 released 2016-03-13
+
+* Fix mochiweb_html regression parsing invalid charref sequences
+  https://github.com/mochi/mochiweb/issues/167
+
 Version 2.13.0 released 2016-02-08
 
 * Support parsing of UTF-16 surrogate pairs encoded as character
@@ -114,7 +119,7 @@ Version 2.7.0 released 2013-08-01
   call instead of an asynchronous cast
 * `mochiweb_html:parse_tokens/1` (and `parse/1`) will now create a
   html element to wrap documents that have a HTML5 doctype
-  (`<!doctype html>`) but no html element 
+  (`<!doctype html>`) but no html element
   https://github.com/mochi/mochiweb/issues/110
 
 Version 2.6.0 released 2013-04-15
@@ -133,7 +138,7 @@ Version 2.5.0 released 2013-03-04
   (URL and Filename safe alphabet, see RFC 4648).
 * Fix rebar.config in mochiwebapp_skel to use {branch, "master"}
   https://github.com/mochi/mochiweb/issues/105
-  
+
 Version 2.4.2 released 2013-02-05
 
 * Fixed issue in mochiweb_response introduced in v2.4.0
index 3d880dbd3d6b85b14a92fdde23ad6b1d7c032ad6..255398dc9aedb781ff95d7c9e782a19c5e182acb 100644 (file)
@@ -203,19 +203,13 @@ json_encode_proplist(Props, State) ->
     lists:reverse([$\} | Acc1]).
 
 json_encode_string(A, State) when is_atom(A) ->
-    L = atom_to_list(A),
-    case json_string_is_safe(L) of
-        true ->
-            [?Q, L, ?Q];
-        false ->
-            json_encode_string_unicode(xmerl_ucs:from_utf8(L), State, [?Q])
-    end;
+    json_encode_string(atom_to_binary(A, latin1), State);
 json_encode_string(B, State) when is_binary(B) ->
     case json_bin_is_safe(B) of
         true ->
             [?Q, B, ?Q];
         false ->
-            json_encode_string_unicode(xmerl_ucs:from_utf8(B), State, [?Q])
+            json_encode_string_unicode(unicode:characters_to_list(B), State, [?Q])
     end;
 json_encode_string(I, _State) when is_integer(I) ->
     [?Q, integer_to_list(I), ?Q];
@@ -250,7 +244,7 @@ json_string_is_safe([C | Rest]) ->
         C when C < 16#7f ->
             json_string_is_safe(Rest);
         _ ->
-            false
+            exit({json_encode, {bad_char, C}})
     end.
 
 json_bin_is_safe(<<>>) ->
@@ -308,12 +302,13 @@ json_encode_string_unicode([C | Cs], State, Acc) ->
                C when C >= 0, C < $\s ->
                    [unihex(C) | Acc];
                C when C >= 16#7f, C =< 16#10FFFF, State#encoder.utf8 ->
-                   [xmerl_ucs:to_utf8(C) | Acc];
+                   [unicode:characters_to_binary([C]) | Acc];
                C when  C >= 16#7f, C =< 16#10FFFF, not State#encoder.utf8 ->
                    [unihex(C) | Acc];
                C when C < 16#7f ->
                    [C | Acc];
                _ ->
+                   %% json_string_is_safe guarantees that this branch is dead
                    exit({json_encode, {bad_char, C}})
            end,
     json_encode_string_unicode(Cs, State, Acc1).
@@ -468,12 +463,14 @@ tokenize_string(B, S=#decoder{offset=O}, Acc) ->
                 %% coalesce UTF-16 surrogate pair
                 <<"\\u", D3, D2, D1, D0, _/binary>> = Rest,
                 D = erlang:list_to_integer([D3,D2,D1,D0], 16),
-                [CodePoint] = xmerl_ucs:from_utf16be(<<C:16/big-unsigned-integer,
-                    D:16/big-unsigned-integer>>),
-                Acc1 = lists:reverse(xmerl_ucs:to_utf8(CodePoint), Acc),
+                Acc1 = [unicode:characters_to_binary(
+                            <<C:16/big-unsigned-integer,
+                              D:16/big-unsigned-integer>>,
+                            utf16)
+                       | Acc],
                 tokenize_string(B, ?ADV_COL(S, 12), Acc1);
             true ->
-                Acc1 = lists:reverse(xmerl_ucs:to_utf8(C), Acc),
+                Acc1 = [unicode:characters_to_binary([C]) | Acc],
                 tokenize_string(B, ?ADV_COL(S, 6), Acc1)
             end;
         <<_:O/binary, C1, _/binary>> when C1 < 128 ->
@@ -709,13 +706,13 @@ e2j_test_vec(utf8) ->
 %% test utf8 encoding
 encoder_utf8_test() ->
     %% safe conversion case (default)
-    [34,"\\u0001","\\u0442","\\u0435","\\u0441","\\u0442",34] =
-        encode(<<1,"\321\202\320\265\321\201\321\202">>),
+    <<"\"\\u0001\\u0442\\u0435\\u0441\\u0442\"">> =
+        iolist_to_binary(encode(<<1,"\321\202\320\265\321\201\321\202">>)),
 
     %% raw utf8 output (optional)
     Enc = mochijson2:encoder([{utf8, true}]),
-    [34,"\\u0001",[209,130],[208,181],[209,129],[209,130],34] =
-        Enc(<<1,"\321\202\320\265\321\201\321\202">>).
+    <<34,"\\u0001",209,130,208,181,209,129,209,130,34>> =
+        iolist_to_binary(Enc(<<1,"\321\202\320\265\321\201\321\202">>)).
 
 input_validation_test() ->
     Good = [
@@ -724,7 +721,7 @@ input_validation_test() ->
         {16#10196, <<?Q, 16#F0, 16#90, 16#86, 16#96, ?Q>>} %% denarius
     ],
     lists:foreach(fun({CodePoint, UTF8}) ->
-        Expect = list_to_binary(xmerl_ucs:to_utf8(CodePoint)),
+        Expect = unicode:characters_to_binary([CodePoint]),
         Expect = decode(UTF8)
     end, Good),
 
@@ -759,7 +756,7 @@ inline_json_test() ->
     ok.
 
 big_unicode_test() ->
-    UTF8Seq = list_to_binary(xmerl_ucs:to_utf8(16#0001d120)),
+    UTF8Seq = unicode:characters_to_binary([16#0001d120]),
     ?assertEqual(
        <<"\"\\ud834\\udd20\"">>,
        iolist_to_binary(encode(UTF8Seq))),
@@ -791,7 +788,10 @@ atom_test() ->
        iolist_to_binary(encode(foo))),
     ?assertEqual(
        <<"\"\\ud834\\udd20\"">>,
-       iolist_to_binary(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))),
+       iolist_to_binary(
+         encode(
+           binary_to_atom(
+             unicode:characters_to_binary([16#0001d120]), latin1)))),
     ok.
 
 key_encode_test() ->
@@ -836,18 +836,21 @@ unsafe_chars_test() ->
        json_string_is_safe([16#0001d120])),
     ?assertEqual(
        false,
-       json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8(16#0001d120)))),
+       json_bin_is_safe(unicode:characters_to_binary([16#0001d120]))),
     ?assertEqual(
        [16#0001d120],
-       xmerl_ucs:from_utf8(
-         binary_to_list(
-           decode(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))))),
+       unicode:characters_to_list(
+         decode(
+           encode(
+             binary_to_atom(
+               unicode:characters_to_binary([16#0001d120]),
+               latin1))))),
     ?assertEqual(
        false,
-       json_string_is_safe([16#110000])),
+       json_string_is_safe([16#10ffff])),
     ?assertEqual(
        false,
-       json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8([16#110000])))),
+       json_bin_is_safe(unicode:characters_to_binary([16#10ffff]))),
     %% solidus can be escaped but isn't unsafe by default
     ?assertEqual(
        <<"/">>,
@@ -904,4 +907,36 @@ format_test_() ->
                    {eep18, {P}},
                    {proplist, P}]].
 
+array_test() ->
+    A = [<<"hello">>],
+    ?assertEqual(A, decode(encode({array, A}))).
+
+bad_char_test() ->
+    ?assertEqual(
+       {'EXIT', {json_encode, {bad_char, 16#110000}}},
+       catch json_string_is_safe([16#110000])).
+
+utf8_roundtrip_test_() ->
+    %% These are the boundary cases for UTF8 encoding
+    Codepoints = [%% 7 bits  -> 1 byte
+                  16#00, 16#7f,
+                  %% 11 bits -> 2 bytes
+                  16#080, 16#07ff,
+                  %% 16 bits -> 3 bytes
+                  16#0800, 16#ffff,
+                  16#d7ff, 16#e000,
+                  %% 21 bits -> 4 bytes
+                  16#010000, 16#10ffff],
+    UTF8 = unicode:characters_to_binary(Codepoints),
+    Encode = encoder([{utf8, true}]),
+    [{"roundtrip escaped",
+      ?_assertEqual(UTF8, decode(encode(UTF8)))},
+     {"roundtrip utf8",
+      ?_assertEqual(UTF8, decode(Encode(UTF8)))}].
+
+utf8_non_character_test_() ->
+    S = unicode:characters_to_binary([16#ffff, 16#fffe]),
+    [{"roundtrip escaped", ?_assertEqual(S, decode(encode(S)))},
+     {"roundtrip utf8", ?_assertEqual(S, decode((encoder([{utf8, true}]))(S)))}].
+
 -endif.
index f20c719d5109fab13ac486f88247f558a121f704..30fa905c263c9683693db8090d2682f88c5617f6 100644 (file)
@@ -1,6 +1,6 @@
 {application,mochiweb,
              [{description,"MochiMedia Web Server"},
-              {vsn,"2.13.0"},
+              {vsn,"2.13.1"},
               {modules,[]},
               {registered,[]},
               {env,[]},
index 3c5c4f91e990cb98def772b388204bd5d825ae68..70723af4f140fe5f22edffd612ac3e4d82d6b331 100644 (file)
@@ -640,7 +640,7 @@ find_gt(Bin, S=#decoder{offset=O}, HasSlash) ->
 tokenize_charref(Bin, S=#decoder{offset=O}) ->
     try
         case tokenize_charref_raw(Bin, S, O) of
-            {C1, S1=#decoder{offset=O1}} when C1 >= 16#D800 andalso C1 =< 16#DFFF ->
+            {C1, S1} when C1 >= 16#D800 andalso C1 =< 16#DFFF ->
                 %% Surrogate pair
                 tokeninize_charref_surrogate_pair(Bin, S1, C1);
             {Unichar, S1} when is_integer(Unichar) ->
@@ -648,7 +648,9 @@ tokenize_charref(Bin, S=#decoder{offset=O}) ->
                  S1};
             {Unichars, S1} when is_list(Unichars) ->
                 {{data, unicode:characters_to_binary(Unichars), false},
-                 S1}
+                 S1};
+            {undefined, _} ->
+                throw(invalid_charref)
         end
     catch
         throw:invalid_charref ->
index f67759a1cf52b6dcae9c2b8f34d7501e666231c6..0310b289f8ff589175257a59abbdec716bceb82d 100644 (file)
@@ -562,7 +562,15 @@ parse_amp_test_() ->
      ?_assertEqual(
         {<<"html">>,[],
          [{<<"body">>,[],[<<"&">>]}]},
-        mochiweb_html:parse("<html><body>&</body></html>"))].
+        mochiweb_html:parse("<html><body>&</body></html>")),
+     ?_assertEqual(
+        {<<"html">>,[],
+         [{<<"body">>,[],[<<"&;">>]}]},
+        mochiweb_html:parse("<html><body>&;</body></html>")),
+     ?_assertEqual(
+        {<<"html">>,[],
+         [{<<"body">>,[],[<<"&MISSING;">>]}]},
+        mochiweb_html:parse("<html><body>&MISSING;</body></html>"))].
 
 parse_unescaped_lt_test() ->
     D1 = <<"<div> < < <a href=\"/\">Back</a></div>">>,
diff --git a/rabbitmq-server/deps/rabbit_common/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbit_common/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
diff --git a/rabbitmq-server/deps/rabbit_common/CONTRIBUTING.md b/rabbitmq-server/deps/rabbit_common/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..45bbcbe
--- /dev/null
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 44b9566226d426f910fa459fdfac20907958eaff..4b8a7de7b3e7d4c92abbe4439effec9a8f8b7a7f 100644 (file)
@@ -20,22 +20,19 @@ ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include mk/rabbitmq-components.mk
 include erlang.mk
+include mk/rabbitmq-build.mk
 include mk/rabbitmq-dist.mk
 
 # --------------------------------------------------------------------
 # Compilation.
 # --------------------------------------------------------------------
 
-ERTS_VER := $(shell erl -version 2>&1 | sed -E 's/.* version //')
+# $(ERTS_VER) is set in `rabbitmq-build.mk` above.
 tls_atom_version_MAX_ERTS_VER = 6.0
 ifeq ($(call compare_version,$(ERTS_VER),$(tls_atom_version_MAX_ERTS_VER),<),true)
 RMQ_ERLC_OPTS += -Ddefine_tls_atom_version
 endif
 
-ERLC_OPTS += $(RMQ_ERLC_OPTS)
-
-TEST_ERLC_OPTS += $(RMQ_ERLC_OPTS)
-
 # --------------------------------------------------------------------
 # Framing sources generation.
 # --------------------------------------------------------------------
index e2468c20570bb84a171972384e8d7ea93371b136..a0300ed892469d29225fb0431e3977c4cdf86227 100755 (executable)
@@ -95,7 +95,7 @@ def prettyType(typeName, subTypes, typesPerLine = 4):
     sTs = multiLineFormat(subTypes,
                           "( ", " | ", "\n       | ", " )",
                           thingsPerLine = typesPerLine)
-    return "-type(%s ::\n       %s)." % (typeName, sTs)
+    return "-type %s ::\n       %s." % (typeName, sTs)
 
 def printFileHeader():
     print("""%%   Autogenerated code. Do not edit.
@@ -348,7 +348,6 @@ def genErl(spec):
 
 """)
     print("%% Various types")
-    print("-ifdef(use_specs).")
 
     print("""-export_type([amqp_field_type/0, amqp_property_type/0,
               amqp_table/0, amqp_array/0, amqp_value/0,
@@ -356,18 +355,18 @@ def genErl(spec):
               amqp_method_field_name/0, amqp_property_record/0,
               amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]).
 
--type(amqp_field_type() ::
+-type amqp_field_type() ::
       'longstr' | 'signedint' | 'decimal' | 'timestamp' |
       'unsignedbyte' | 'unsignedshort' | 'unsignedint' |
       'table' | 'byte' | 'double' | 'float' | 'long' |
-      'short' | 'bool' | 'binary' | 'void' | 'array').
--type(amqp_property_type() ::
+      'short' | 'bool' | 'binary' | 'void' | 'array'.
+-type amqp_property_type() ::
       'shortstr' | 'longstr' | 'octet' | 'short' | 'long' |
-      'longlong' | 'timestamp' | 'bit' | 'table').
+      'longlong' | 'timestamp' | 'bit' | 'table'.
 
--type(amqp_table() :: [{binary(), amqp_field_type(), amqp_value()}]).
--type(amqp_array() :: [{amqp_field_type(), amqp_value()}]).
--type(amqp_value() :: binary() |    % longstr
+-type amqp_table() :: [{binary(), amqp_field_type(), amqp_value()}].
+-type amqp_array() :: [{amqp_field_type(), amqp_value()}].
+-type amqp_value() :: binary() |    % longstr
                       integer() |   % signedint
                       {non_neg_integer(), non_neg_integer()} | % decimal
                       amqp_table() |
@@ -379,8 +378,7 @@ def genErl(spec):
                       boolean() |   % bool
                       binary() |    % binary
                       'undefined' | % void
-                      non_neg_integer() % timestamp
-     ).
+                      non_neg_integer(). % timestamp
 """)
 
     print(prettyType("amqp_method_name()",
@@ -410,27 +408,25 @@ def genErl(spec):
                      ["%i" % ci for ci in classIds]))
     print(prettyType("amqp_class_name()",
                      ["%s" % c.erlangName() for c in spec.allClasses()]))
-    print("-endif. % use_specs")
 
     print("""
 %% Method signatures
--ifdef(use_specs).
--spec(version/0 :: () -> {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
--spec(lookup_method_name/1 :: (amqp_method()) -> amqp_method_name()).
--spec(lookup_class_name/1 :: (amqp_class_id()) -> amqp_class_name()).
--spec(method_id/1 :: (amqp_method_name()) -> amqp_method()).
--spec(method_has_content/1 :: (amqp_method_name()) -> boolean()).
--spec(is_method_synchronous/1 :: (amqp_method_record()) -> boolean()).
--spec(method_record/1 :: (amqp_method_name()) -> amqp_method_record()).
--spec(method_fieldnames/1 :: (amqp_method_name()) -> [amqp_method_field_name()]).
--spec(decode_method_fields/2 ::
-        (amqp_method_name(), binary()) -> amqp_method_record() | rabbit_types:connection_exit()).
--spec(decode_properties/2 :: (non_neg_integer(), binary()) -> amqp_property_record()).
--spec(encode_method_fields/1 :: (amqp_method_record()) -> binary()).
--spec(encode_properties/1 :: (amqp_property_record()) -> binary()).
--spec(lookup_amqp_exception/1 :: (amqp_exception()) -> {boolean(), amqp_exception_code(), binary()}).
--spec(amqp_exception/1 :: (amqp_exception_code()) -> amqp_exception()).
--endif. % use_specs
+-spec version() -> {non_neg_integer(), non_neg_integer(), non_neg_integer()}.
+-spec lookup_method_name(amqp_method()) -> amqp_method_name().
+-spec lookup_class_name(amqp_class_id()) -> amqp_class_name().
+-spec method_id(amqp_method_name()) -> amqp_method().
+-spec method_has_content(amqp_method_name()) -> boolean().
+-spec is_method_synchronous(amqp_method_record()) -> boolean().
+-spec method_record(amqp_method_name()) -> amqp_method_record().
+-spec method_fieldnames(amqp_method_name()) -> [amqp_method_field_name()].
+-spec decode_method_fields(amqp_method_name(), binary()) ->
+          amqp_method_record() | rabbit_types:connection_exit().
+-spec decode_properties(non_neg_integer(), binary()) -> amqp_property_record().
+-spec encode_method_fields(amqp_method_record()) -> binary().
+-spec encode_properties(amqp_property_record()) -> binary().
+-spec lookup_amqp_exception(amqp_exception()) ->
+          {boolean(), amqp_exception_code(), binary()}.
+-spec amqp_exception(amqp_exception_code()) -> amqp_exception().
 
 bitvalue(true) -> 1;
 bitvalue(false) -> 0;
diff --git a/rabbitmq-server/deps/rabbit_common/include/old_builtin_types.hrl b/rabbitmq-server/deps/rabbit_common/include/old_builtin_types.hrl
new file mode 100644 (file)
index 0000000..db3666b
--- /dev/null
@@ -0,0 +1,30 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+%% Old builtin types found in ERlang R16B03.
+-ifdef(use_old_builtin_types).
+-define(ARRAY_TYPE, array).
+-define(DICT_TYPE, dict).
+-define(GB_SET_TYPE, gb_set).
+-define(QUEUE_TYPE, queue).
+-define(SET_TYPE, set).
+-else.
+-define(ARRAY_TYPE, array:array).
+-define(DICT_TYPE, dict:dict).
+-define(GB_SET_TYPE, gb_sets:set).
+-define(QUEUE_TYPE, queue:queue).
+-define(SET_TYPE, sets:set).
+-endif.
index 40193be1a596a71e2832a59267227d6b7d7d8b26..85f18629f3b0c3ec4ee69b7803d7f05fe0a1a5b7 100644 (file)
@@ -14,6 +14,8 @@
 %% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
+-include("old_builtin_types.hrl").
+
 %% Passed around most places
 -record(user, {username,
                tags,
 -define(EMPTY_FRAME_SIZE, 8).
 
 -define(MAX_WAIT, 16#ffffffff).
+-define(SUPERVISOR_WAIT, infinity).
+-define(WORKER_WAIT, 30000).
 
 -define(HIBERNATE_AFTER_MIN,        1000).
 -define(DESIRED_HIBERNATE,         10000).
similarity index 53%
rename from rabbitmq-server/deps/rabbitmq_event_exchange/test/src/rabbit_exchange_type_event_test_all.erl
rename to rabbitmq-server/deps/rabbit_common/include/rabbit_misc.hrl
index 278b9e01b99ccd2666335885ec9845fa85819fb6..26dce4ba842e00474a7126e55c9ac00032528474 100644 (file)
@@ -8,22 +8,10 @@
 %% the License for the specific language governing rights and
 %% limitations under the License.
 %%
-%% The Original Code is RabbitMQ Consistent Hash Exchange.
+%% The Original Code is RabbitMQ.
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_exchange_type_event_test_all).
-
--export([all_tests/0]).
-
-all_tests() ->
-    ok = eunit:test(tests(rabbit_exchange_type_event_unit_test, 60), [verbose]),
-    ok = eunit:test(tests(rabbit_exchange_type_event_test, 60), [verbose]).
-
-tests(Module, Timeout) ->
-    {foreach, fun() -> ok end,
-     [{timeout, Timeout, fun () -> Module:F() end} ||
-         {F, _Arity} <- proplists:get_value(exports, Module:module_info()),
-         string:right(atom_to_list(F), 5) =:= "_test"]}.
+-define(RPC_TIMEOUT, infinity).
index 8bcf2ce6298b6ea5bc5056e061b0edfbf75ba761..8cf830cf803a4f843e6406638ea5451e3363e460 100644 (file)
 
 -include("rabbit.hrl").
 
--ifdef(use_specs).
-
 -type(msg() :: any()).
 
--endif.
-
 -record(msg_location, {msg_id, ref_count, file, offset, total_size}).
diff --git a/rabbitmq-server/deps/rabbit_common/mk/rabbitmq-build.mk b/rabbitmq-server/deps/rabbit_common/mk/rabbitmq-build.mk
new file mode 100644 (file)
index 0000000..02842ae
--- /dev/null
@@ -0,0 +1,68 @@
+# --------------------------------------------------------------------
+# Compiler flags.
+# --------------------------------------------------------------------
+
+# FIXME: We copy Erlang.mk default flags here: rabbitmq-build.mk is
+# loaded as a plugin, so before those variables are defined. And because
+# Erlang.mk uses '?=', the flags we set here override the default set.
+
+WARNING_OPTS += +debug_info \
+               +warn_export_vars \
+               +warn_shadow_vars \
+               +warn_obsolete_guard
+ERLC_OPTS += -Werror $(WARNING_OPTS)
+TEST_ERLC_OPTS += $(WARNING_OPTS)
+
+define compare_version
+$(shell awk 'BEGIN {
+       split("$(1)", v1, ".");
+       version1 = v1[1] * 1000000 + v1[2] * 10000 + v1[3] * 100 + v1[4];
+
+       split("$(2)", v2, ".");
+       version2 = v2[1] * 1000000 + v2[2] * 10000 + v2[3] * 100 + v2[4];
+
+       if (version1 $(3) version2) {
+               print "true";
+       } else {
+               print "false";
+       }
+}')
+endef
+
+# Erlang R16B03 has no support for new types in Erlang 17.0, leading to
+# a build-time error.
+ERTS_VER := $(shell erl -version 2>&1 | sed -E 's/.* version //')
+old_builtin_types_MAX_ERTS_VER = 6.0
+ifeq ($(call compare_version,$(ERTS_VER),$(old_builtin_types_MAX_ERTS_VER),<),true)
+RMQ_ERLC_OPTS += -Duse_old_builtin_types
+endif
+
+# Push our compilation options to both the normal and test ERLC_OPTS.
+ERLC_OPTS += $(RMQ_ERLC_OPTS)
+TEST_ERLC_OPTS += $(RMQ_ERLC_OPTS)
+
+# --------------------------------------------------------------------
+# Common test flags.
+# --------------------------------------------------------------------
+
+# Disable most messages on Travis and Concourse.
+#
+# Concourse doesn't set any environment variables to help us automate
+# things. In rabbitmq-ci, we run tests under the `concourse` user so,
+# look at that...
+CT_QUIET_FLAGS = -verbosity 50 \
+                -erl_args \
+                -kernel error_logger silent
+ifdef TRAVIS
+CT_OPTS += $(CT_QUIET_FLAGS)
+endif
+ifdef CONCOURSE
+CT_OPTS += $(CT_QUIET_FLAGS)
+endif
+
+# Enable JUnit-like report on Jenkins. Jenkins parses those reports so
+# the results can be browsed from its UI. Furthermore, it displays a
+# graph showing evolution of the results over time.
+ifdef JENKINS_HOME
+CT_OPTS += -ct_hooks cth_surefire
+endif
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index 52153c1cc6bbeb50e7f59102b51c6374554f12f2..b044606fd3322f232c389de43596c6840cd25767 100644 (file)
@@ -22,24 +22,6 @@ $(shell awk '
 }' $(1))
 endef
 
-# Our type specs rely on dict:dict/0 etc, which are only available in
-# 17.0 upwards.
-define compare_version
-$(shell awk 'BEGIN {
-       split("$(1)", v1, ".");
-       version1 = v1[1] * 1000000 + v1[2] * 10000 + v1[3] * 100 + v1[4];
-
-       split("$(2)", v2, ".");
-       version2 = v2[1] * 1000000 + v2[2] * 10000 + v2[3] * 100 + v2[4];
-
-       if (version1 $(3) version2) {
-               print "true";
-       } else {
-               print "false";
-       }
-}')
-endef
-
 # Define the target to create an .ez plugin archive. This macro is
 # called like this:
 #
index 6b0e5e5b99ef8d93720ab5d3c1242e30911f47c2..906ed029ddfb13eda8a36e12de0286f2f1369741 100644 (file)
@@ -1,3 +1,7 @@
+ifeq ($(filter rabbitmq-build.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-build.mk
+endif
+
 ifeq ($(filter rabbitmq-dist.mk,$(notdir $(MAKEFILE_LIST))),)
 include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-dist.mk
 endif
index 72ce8b04b349d2383862f73b1e83118bda880bb3..db7927277582c337e134b7d6ad40154b3a117d43 100644 (file)
@@ -39,6 +39,9 @@ endif
 
 export RABBITMQ_SCRIPTS_DIR RABBITMQCTL RABBITMQ_PLUGINS RABBITMQ_SERVER
 
+# We export MAKE to be sure scripts and tests use the proper command.
+export MAKE
+
 # We need to pass the location of codegen to the Java client ant
 # process.
 CODEGEN_DIR = $(DEPS_DIR)/rabbitmq_codegen
@@ -61,18 +64,21 @@ node_tmpdir = $(TEST_TMPDIR)/$(1)
 node_pid_file = $(call node_tmpdir,$(1))/$(1).pid
 node_log_base = $(call node_tmpdir,$(1))/log
 node_mnesia_base = $(call node_tmpdir,$(1))/mnesia
+node_mnesia_dir = $(call node_mnesia_base,$(1))/$(1)
 node_plugins_expand_dir = $(call node_tmpdir,$(1))/plugins
 node_enabled_plugins_file = $(call node_tmpdir,$(1))/enabled_plugins
 
 # Broker startup variables for the test environment.
 RABBITMQ_NODENAME ?= rabbit
-NODE_TMPDIR ?= $(call node_tmpdir,$(RABBITMQ_NODENAME))
+RABBITMQ_NODENAME_FOR_PATHS ?= $(RABBITMQ_NODENAME)
+NODE_TMPDIR ?= $(call node_tmpdir,$(RABBITMQ_NODENAME_FOR_PATHS))
 
-RABBITMQ_PID_FILE ?= $(call node_pid_file,$(RABBITMQ_NODENAME))
-RABBITMQ_LOG_BASE ?= $(call node_log_base,$(RABBITMQ_NODENAME))
-RABBITMQ_MNESIA_BASE ?= $(call node_mnesia_base,$(RABBITMQ_NODENAME))
-RABBITMQ_PLUGINS_EXPAND_DIR ?= $(call node_plugins_expand_dir,$(RABBITMQ_NODENAME))
-RABBITMQ_ENABLED_PLUGINS_FILE ?= $(call node_enabled_plugins_file,$(RABBITMQ_NODENAME))
+RABBITMQ_PID_FILE ?= $(call node_pid_file,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_LOG_BASE ?= $(call node_log_base,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_MNESIA_BASE ?= $(call node_mnesia_base,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_MNESIA_DIR ?= $(call node_mnesia_dir,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_PLUGINS_EXPAND_DIR ?= $(call node_plugins_expand_dir,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_ENABLED_PLUGINS_FILE ?= $(call node_enabled_plugins_file,$(RABBITMQ_NODENAME_FOR_PATHS))
 
 # erlang.mk adds dependencies' ebin directory to ERL_LIBS. This is
 # a sane default, but we prefer to rely on the .ez archives in the
@@ -85,17 +91,19 @@ MAKE="$(MAKE)" \
 ERL_LIBS="$(DIST_ERL_LIBS)" \
 RABBITMQ_NODENAME="$(1)" \
 RABBITMQ_NODE_IP_ADDRESS="$(RABBITMQ_NODE_IP_ADDRESS)" \
-RABBITMQ_NODE_PORT="$(2)" \
-RABBITMQ_PID_FILE="$(call node_pid_file,$(1))" \
-RABBITMQ_LOG_BASE="$(call node_log_base,$(1))" \
-RABBITMQ_MNESIA_BASE="$(call node_mnesia_base,$(1))" \
+RABBITMQ_NODE_PORT="$(3)" \
+RABBITMQ_PID_FILE="$(call node_pid_file,$(2))" \
+RABBITMQ_LOG_BASE="$(call node_log_base,$(2))" \
+RABBITMQ_MNESIA_BASE="$(call node_mnesia_base,$(2))" \
+RABBITMQ_MNESIA_DIR="$(call node_mnesia_dir,$(2))" \
 RABBITMQ_PLUGINS_DIR="$(CURDIR)/$(DIST_DIR)" \
-RABBITMQ_PLUGINS_EXPAND_DIR="$(call node_plugins_expand_dir,$(1))" \
+RABBITMQ_PLUGINS_EXPAND_DIR="$(call node_plugins_expand_dir,$(2))" \
 RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)"
 endef
 
-BASIC_SCRIPT_ENV_SETTINGS = $(call basic_script_env_settings,$(RABBITMQ_NODENAME),$(RABBITMQ_NODE_PORT)) \
-                           RABBITMQ_ENABLED_PLUGINS_FILE="$(RABBITMQ_ENABLED_PLUGINS_FILE)"
+BASIC_SCRIPT_ENV_SETTINGS = \
+       $(call basic_script_env_settings,$(RABBITMQ_NODENAME),$(RABBITMQ_NODENAME_FOR_PATHS),$(RABBITMQ_NODE_PORT)) \
+       RABBITMQ_ENABLED_PLUGINS_FILE="$(RABBITMQ_ENABLED_PLUGINS_FILE)"
 
 # NOTE: Running a plugin requires RabbitMQ itself. As this file is
 # loaded *after* erlang.mk, it is too late to add "rabbit" to the
@@ -144,9 +152,63 @@ $(RABBITMQ_ENABLED_PLUGINS_FILE): node-tmpdir
 # Run a full RabbitMQ.
 # --------------------------------------------------------------------
 
-run-broker: node-tmpdir $(RABBITMQ_ENABLED_PLUGINS_FILE)
+define test_rabbitmq_config
+%% vim:ft=erlang:
+
+[
+  {rabbit, [
+      {loopback_users, []}
+    ]}
+].
+endef
+
+define test_rabbitmq_config_with_tls
+%% vim:ft=erlang:
+
+[
+  {rabbit, [
+      {loopback_users, []},
+      {ssl_listeners, [5671]},
+      {ssl_options, [
+          {cacertfile, "$(TEST_TLS_CERTS_DIR_in_config)/testca/cacert.pem"},
+          {certfile,   "$(TEST_TLS_CERTS_DIR_in_config)/server/cert.pem"},
+          {keyfile,    "$(TEST_TLS_CERTS_DIR_in_config)/server/key.pem"},
+          {verify, verify_peer},
+          {fail_if_no_peer_cert, false},
+          {honor_cipher_order, true}]}
+    ]}
+].
+endef
+
+TEST_CONFIG_FILE ?= $(TEST_TMPDIR)/test.config
+TEST_TLS_CERTS_DIR = $(TEST_TMPDIR)/tls-certs
+ifeq ($(PLATFORM),msys2)
+TEST_TLS_CERTS_DIR_in_config = $(shell echo $(TEST_TLS_CERTS_DIR) | sed -E "s,^/([^/]+),\1:,")
+else
+TEST_TLS_CERTS_DIR_in_config = $(TEST_TLS_CERTS_DIR)
+endif
+
+.PHONY: $(TEST_CONFIG_FILE)
+$(TEST_CONFIG_FILE): node-tmpdir
+       $(gen_verbose) printf "$(subst $(newline),\n,$(subst ",\",$(config)))" > $@
+
+$(TEST_TLS_CERTS_DIR): node-tmpdir
+       $(gen_verbose) $(MAKE) -C $(DEPS_DIR)/rabbit_common/tools/tls-certs \
+               DIR=$(TEST_TLS_CERTS_DIR) all
+
+show-test-tls-certs-dir: $(TEST_TLS_CERTS_DIR)
+       @echo $(TEST_TLS_CERTS_DIR)
+
+run-broker run-tls-broker: RABBITMQ_CONFIG_FILE = $(basename $(TEST_CONFIG_FILE))
+run-broker:     config := $(test_rabbitmq_config)
+run-tls-broker: config := $(test_rabbitmq_config_with_tls)
+run-tls-broker: $(TEST_TLS_CERTS_DIR)
+
+run-broker run-tls-broker: node-tmpdir $(RABBITMQ_ENABLED_PLUGINS_FILE) \
+    $(TEST_CONFIG_FILE)
        $(BASIC_SCRIPT_ENV_SETTINGS) \
          RABBITMQ_ALLOW_INPUT=true \
+         RABBITMQ_CONFIG_FILE=$(RABBITMQ_CONFIG_FILE) \
          $(RABBITMQ_SERVER)
 
 run-background-broker: node-tmpdir $(RABBITMQ_ENABLED_PLUGINS_FILE)
@@ -205,6 +267,15 @@ start-background-node: node-tmpdir $(RABBITMQ_ENABLED_PLUGINS_FILE)
        ERL_LIBS="$(DIST_ERL_LIBS)" \
          $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) wait $(RABBITMQ_PID_FILE) kernel
 
+start-background-broker: node-tmpdir $(RABBITMQ_ENABLED_PLUGINS_FILE)
+       $(BASIC_SCRIPT_ENV_SETTINGS) \
+         $(RABBITMQ_SERVER) \
+         $(REDIRECT_STDIO) &
+       ERL_LIBS="$(DIST_ERL_LIBS)" \
+         $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) wait $(RABBITMQ_PID_FILE) && \
+       ERL_LIBS="$(DIST_ERL_LIBS)" \
+         $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) status >/dev/null
+
 start-rabbit-on-node:
        $(exec_verbose) echo 'rabbit:start().' | $(ERL_CALL) $(ERL_CALL_OPTS) | sed -E '/^\{ok, ok\}$$/d'
        $(verbose) ERL_LIBS="$(DIST_ERL_LIBS)" \
@@ -229,7 +300,7 @@ stop-node:
        ) || :
 
 clean-node-db:
-       $(exec_verbose) rm -rf $(RABBITMQ_MNESIA_BASE)/$(RABBITMQ_NODENAME)/*
+       $(exec_verbose) rm -rf $(RABBITMQ_MNESIA_DIR)/*
 
 start-cover:
        $(exec_verbose) echo "rabbit_misc:start_cover([\"rabbit\", \"hare\"])." | $(ERL_CALL) $(ERL_CALL_OPTS) | sed -E '/^\{ok, ok\}$$/d'
@@ -254,7 +325,7 @@ virgin-other-node-tmpdir:
                $(call node_plugins_expand_dir,$(OTHER_NODE))
 
 start-other-node: other-node-tmpdir
-       $(exec_verbose) $(call basic_script_env_settings,$(OTHER_NODE),$(OTHER_PORT)) \
+       $(exec_verbose) $(call basic_script_env_settings,$(OTHER_NODE),$(OTHER_NODE),$(OTHER_PORT)) \
        RABBITMQ_ENABLED_PLUGINS_FILE="$(if $(OTHER_PLUGINS),$(OTHER_PLUGINS),$($(call node_enabled_plugins_file,$(OTHER_NODE))))" \
        RABBITMQ_CONFIG_FILE="$(CURDIR)/etc/$(if $(OTHER_CONFIG),$(OTHER_CONFIG),$(OTHER_NODE))" \
        RABBITMQ_NODE_ONLY='' \
index 06e4048f0f54ba160ec85b96eec8ce7ce993a5c0..c83a8422d9499eb1b00c601e9dd6669d8532ba4e 100644 (file)
@@ -25,6 +25,12 @@ update-rabbitmq-components-mk: rabbitmq-components-mk
                || $(MAKE) -C $$repo rabbitmq-components-mk; \
        done
 
+update-contributor-code-of-conduct:
+       $(verbose) for repo in $(READY_DEPS:%=$(DEPS_DIR)/%); do \
+               cp $(DEPS_DIR)/rabbit_common/CODE_OF_CONDUCT.md $$repo/CODE_OF_CONDUCT.md; \
+               cp $(DEPS_DIR)/rabbit_common/CONTRIBUTING.md $$repo/CONTRIBUTING.md; \
+       done
+
 ifneq ($(wildcard .git),)
 
 .PHONY: sync-gitremote sync-gituser
index 748f285af94347ef50f257943a491416da36ce68..6504c3f543ac9bc569cd023c303770254f2fb5d7 100644 (file)
@@ -19,8 +19,6 @@
          stop_applications/1, stop_applications/2, app_dependency_order/2,
          app_dependencies/1]).
 
--ifdef(use_specs).
-
 -type error_handler() :: fun((atom(), any()) -> 'ok').
 
 -spec load_applications([atom()])                   -> 'ok'.
@@ -31,8 +29,6 @@
 -spec app_dependency_order([atom()], boolean())     -> [digraph:vertex()].
 -spec app_dependencies(atom())                      -> [atom()].
 
--endif.
-
 %%---------------------------------------------------------------------------
 %% Public API
 
diff --git a/rabbitmq-server/deps/rabbit_common/src/code_version.erl b/rabbitmq-server/deps/rabbit_common/src/code_version.erl
new file mode 100644 (file)
index 0000000..c6657d8
--- /dev/null
@@ -0,0 +1,258 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+-module(code_version).
+
+-export([update/1]).
+
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+%%----------------------------------------------------------------------------
+%% @doc Reads the abstract code of the given `Module`, modifies it to adapt to
+%% the current Erlang version, compiles and loads the result.
+%% This function finds the current Erlang version and then selects the function
+%% call for that version, removing all other versions declared in the original
+%% beam file. `code_version:update/1` is triggered by the module itself the
+%% first time an affected function is called.
+%%
+%% The purpose of this functionality is to support the new time API introduced
+%% in ERTS 7.0, while providing compatibility with previous versions.
+%%
+%% `Module` must contain an attribute `erlang_version_support` containing a list of
+%% tuples:
+%%
+%% {ErlangVersion, [{OriginalFuntion, Arity, PreErlangVersionFunction,
+%%                   PostErlangVersionFunction}]}
+%%
+%% All these new functions may be exported, and implemented as follows:
+%%
+%% OriginalFunction() ->
+%%    code_version:update(?MODULE),
+%%    ?MODULE:OriginalFunction().
+%%
+%% PostErlangVersionFunction() ->
+%%    %% implementation using new time API
+%%    ..
+%%
+%% PreErlangVersionFunction() ->
+%%    %% implementation using fallback solution
+%%    ..
+%%
+%% See `time_compat.erl` for an example.
+%%
+%% end
+%%----------------------------------------------------------------------------
+-spec update(atom()) -> ok | no_return().
+update(Module) ->
+    AbsCode = get_abs_code(Module),
+    Forms = replace_forms(Module, get_otp_version(), AbsCode),
+    Code = compile_forms(Forms),
+    load_code(Module, Code).
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+load_code(Module, Code) ->
+    LockId = {{?MODULE, Module}, self()},
+    FakeFilename = "Loaded by rabbit_common",
+    global:set_lock(LockId, [node()]),
+    case code:which(Module) of
+        FakeFilename ->
+            ok;
+        _ ->
+            unload(Module),
+            case code:load_binary(Module, FakeFilename, Code) of
+                {module, _}     -> ok;
+                {error, Reason} -> throw({cannot_load, Module, Reason})
+            end
+    end,
+    global:del_lock(LockId, [node()]),
+    ok.
+
+unload(Module) ->
+    code:soft_purge(Module),
+    code:delete(Module).
+
+compile_forms(Forms) ->
+    case compile:forms(Forms, [debug_info]) of
+        {ok, _ModName, Code} ->
+            Code;
+        {ok, _ModName, Code, _Warnings} ->
+            Code;
+        Error ->
+            throw({cannot_compile_forms, Error})
+    end.
+
+get_abs_code(Module) ->
+    get_forms(get_object_code(Module)).
+
+get_object_code(Module) ->
+    case code:get_object_code(Module) of
+        {_Mod, Code, _File} ->
+            Code;
+        error ->
+            throw({not_found, Module})
+    end.
+
+get_forms(Code) ->
+    case beam_lib:chunks(Code, [abstract_code]) of
+        {ok, {_, [{abstract_code, {raw_abstract_v1, Forms}}]}} ->
+            Forms;
+        {ok, {Module, [{abstract_code, no_abstract_code}]}} ->
+            throw({no_abstract_code, Module});
+        {error, beam_lib, Reason} ->
+            throw({no_abstract_code, Reason})
+    end.
+
+get_otp_version() ->
+    Version = erlang:system_info(otp_release),
+    case re:run(Version, "^[0-9][0-9]", [{capture, first, list}]) of
+        {match, [V]} ->
+            list_to_integer(V);
+        _ ->
+            %% Could be anything below R17, we are not interested
+            0
+    end.
+
+get_original_pairs(VersionSupport) ->
+    [{Orig, Arity} || {Orig, Arity, _Pre, _Post} <- VersionSupport].
+
+get_delete_pairs(true, VersionSupport) ->
+    [{Pre, Arity} || {_Orig, Arity, Pre, _Post} <- VersionSupport];
+get_delete_pairs(false, VersionSupport) ->
+    [{Post, Arity} || {_Orig, Arity, _Pre, Post} <- VersionSupport].
+
+get_rename_pairs(true, VersionSupport) ->
+    [{Post, Arity} || {_Orig, Arity, _Pre, Post} <- VersionSupport];
+get_rename_pairs(false, VersionSupport) ->
+    [{Pre, Arity} || {_Orig, Arity, Pre, _Post} <- VersionSupport].
+
+%% Pairs of {Renamed, OriginalName} functions
+get_name_pairs(true, VersionSupport) ->
+    [{{Post, Arity}, Orig} || {Orig, Arity, _Pre, Post} <- VersionSupport];
+get_name_pairs(false, VersionSupport) ->
+    [{{Pre, Arity}, Orig} || {Orig, Arity, Pre, _Post} <- VersionSupport].
+
+delete_abstract_functions(ToDelete) ->
+    fun(Tree, Function) ->
+            case lists:member(Function, ToDelete) of
+                true ->
+                    erl_syntax:comment(["Deleted unused function"]);
+                false ->
+                    Tree
+            end
+    end.
+
+rename_abstract_functions(ToRename, ToName) ->
+    fun(Tree, Function) ->
+            case lists:member(Function, ToRename) of
+                true ->
+                    FunctionName = proplists:get_value(Function, ToName),
+                    erl_syntax:function(
+                      erl_syntax:atom(FunctionName),
+                      erl_syntax:function_clauses(Tree));
+                false ->
+                    Tree
+            end
+    end.
+
+replace_forms(Module, ErlangVersion, AbsCode) ->
+    %% Obtain attribute containing the list of functions that must be updated
+    Attr = Module:module_info(attributes),
+    VersionSupport = proplists:get_value(erlang_version_support, Attr),
+    {Pre, Post} = lists:splitwith(fun({Version, _Pairs}) ->
+                                          Version > ErlangVersion
+                                  end, VersionSupport),
+    %% Replace functions in two passes: replace for Erlang versions > current
+    %% first, Erlang versions =< current afterwards.
+    replace_version_forms(
+      true, replace_version_forms(false, AbsCode, get_version_functions(Pre)),
+      get_version_functions(Post)).
+
+get_version_functions(List) ->
+    lists:append([Pairs || {_Version, Pairs} <- List]).
+
+replace_version_forms(IsPost, AbsCode, VersionSupport) ->
+    %% Get pairs of {Function, Arity} for the triggering functions, which
+    %% are also the final function names.
+    Original = get_original_pairs(VersionSupport),
+    %% Get pairs of {Function, Arity} for the unused version
+    ToDelete = get_delete_pairs(IsPost, VersionSupport),
+    %% Delete original functions (those that trigger the code update) and
+    %% the unused version ones
+    DeleteFun = delete_abstract_functions(ToDelete ++ Original),
+    AbsCode0 = replace_function_forms(AbsCode, DeleteFun),
+    %% Get pairs of {Function, Arity} for the current version which must be
+    %% renamed
+    ToRename = get_rename_pairs(IsPost, VersionSupport),
+    %% Get paris of {Renamed, OriginalName} functions
+    ToName = get_name_pairs(IsPost, VersionSupport),
+    %% Rename versioned functions with their final name
+    RenameFun = rename_abstract_functions(ToRename, ToName),
+    %% Remove exports of all versioned functions
+    remove_exports(replace_function_forms(AbsCode0, RenameFun),
+                   ToDelete ++ ToRename).
+
+replace_function_forms(AbsCode, Fun) ->
+    ReplaceFunction =
+        fun(Tree) ->
+                Function = erl_syntax_lib:analyze_function(Tree),
+                Fun(Tree, Function)
+        end,
+    Filter = fun(Tree) ->
+                     case erl_syntax:type(Tree) of
+                         function -> ReplaceFunction(Tree);
+                         _Other -> Tree
+                     end
+             end,
+    fold_syntax_tree(Filter, AbsCode).
+
+filter_export_pairs(Info, ToDelete) ->
+    lists:filter(fun(Pair) ->
+                         not lists:member(Pair, ToDelete)
+                 end, Info).
+
+remove_exports(AbsCode, ToDelete) ->
+    RemoveExports =
+        fun(Tree) ->
+                case erl_syntax_lib:analyze_attribute(Tree) of
+                    {export, Info} ->
+                        Remaining = filter_export_pairs(Info, ToDelete),
+                        rebuild_export(Remaining);
+                    _Other -> Tree
+                end
+        end,
+    Filter = fun(Tree) ->
+                     case erl_syntax:type(Tree) of
+                         attribute -> RemoveExports(Tree);
+                         _Other -> Tree
+                     end
+             end,
+    fold_syntax_tree(Filter, AbsCode).
+
+rebuild_export(Args) ->
+    erl_syntax:attribute(
+      erl_syntax:atom(export),
+      [erl_syntax:list(
+         [erl_syntax:arity_qualifier(erl_syntax:atom(N),
+                                     erl_syntax:integer(A))
+          || {N, A} <- Args])]).
+
+fold_syntax_tree(Filter, Forms) ->
+    Tree = erl_syntax:form_list(Forms),
+    NewTree = erl_syntax_lib:map(Filter, Tree),
+    erl_syntax:revert_forms(NewTree).
index 029c55fbc9ae86aa9e31819e5eb857d498c93631..0fe3e66664a0e5ef2043e73f054173468575120e 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([bump_msg/0]).
 
 -opaque(bump_msg() :: {pid(), non_neg_integer()}).
 -type(credit_spec() :: {non_neg_integer(), non_neg_integer()}).
 
--spec(send/1 :: (pid()) -> 'ok').
--spec(send/2 :: (pid(), credit_spec()) -> 'ok').
--spec(ack/1 :: (pid()) -> 'ok').
--spec(ack/2 :: (pid(), credit_spec()) -> 'ok').
--spec(handle_bump_msg/1 :: (bump_msg()) -> 'ok').
--spec(blocked/0 :: () -> boolean()).
--spec(peer_down/1 :: (pid()) -> 'ok').
-
--endif.
+-spec send
+        (pid()) -> 'ok';
+        (credit_spec()) -> 'ok'.
+-spec ack(pid()) -> 'ok'.
+-spec ack(pid(), credit_spec()) -> 'ok'.
+-spec handle_bump_msg(bump_msg()) -> 'ok'.
+-spec blocked() -> boolean().
+-spec peer_down(pid()) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
index 23494399d297936f2cf30d68abe313a4f1c313c3..e2265f7d04a5e7f87e511564290625096730d078 100644 (file)
 -record(gs2_state, {parent, name, state, mod, time,
                     timeout_state, queue, debug, prioritisers}).
 
--ifdef(use_specs).
-
 %%%=========================================================================
 %%%  Specs. These exist only to shut up dialyzer's warnings
 %%%=========================================================================
 
--type(gs2_state() :: #gs2_state{}).
+-type gs2_state() :: #gs2_state{}.
 
--spec(handle_common_termination/3 ::
-        (any(), atom(), gs2_state()) -> no_return()).
--spec(hibernate/1 :: (gs2_state()) -> no_return()).
--spec(pre_hibernate/1 :: (gs2_state()) -> no_return()).
--spec(system_terminate/4 :: (_, _, _, gs2_state()) -> no_return()).
+-spec handle_common_termination(any(), atom(), gs2_state()) -> no_return().
+-spec hibernate(gs2_state()) -> no_return().
+-spec pre_hibernate(gs2_state()) -> no_return().
+-spec system_terminate(_, _, _, gs2_state()) -> no_return().
 
--type(millis() :: non_neg_integer()).
+-type millis() :: non_neg_integer().
 
 %%%=========================================================================
 %%%  API
 %% for handle_pre_hibernate/1 and handle_post_hibernate/1 will result
 %% in warnings (the same applied for the behaviour_info before).
 
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{init,1},{handle_call,3},{handle_cast,2},{handle_info,2},
-     {terminate,2},{code_change,3}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
-
 %%%  -----------------------------------------------------------------
 %%% Starts a generic server.
 %%% start(Mod, Args, Options)
@@ -625,9 +610,7 @@ extend_backoff(undefined) ->
     undefined;
 extend_backoff({backoff, InitialTimeout, MinimumTimeout, DesiredHibPeriod}) ->
     {backoff, InitialTimeout, MinimumTimeout, DesiredHibPeriod,
-      {erlang:phash2([node()]),
-       time_compat:monotonic_time(),
-       time_compat:unique_integer()}}.
+      rand_compat:seed(exsplus)}.
 
 %%%========================================================================
 %%% Internal functions
@@ -767,7 +750,7 @@ adjust_timeout_state(SleptAt, AwokeAt, {backoff, CurrentTO, MinimumTO,
             true -> lists:max([MinimumTO, CurrentTO div 2]);
             false -> CurrentTO
         end,
-    {Extra, RandomState1} = random:uniform_s(Base, RandomState),
+    {Extra, RandomState1} = rand_compat:uniform_s(Base, RandomState),
     CurrentTO1 = Base + Extra,
     {backoff, CurrentTO1, MinimumTO, DesiredHibPeriod, RandomState1}.
 
index 8df90391a09a1b37c78e55b3167158c9a5bfbaa0..991e3541076a6d637c4ee813a0ce03e13d931d47 100644 (file)
                 delegate,
                 group,
                 tx_fun,
-                initial_childspecs}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
+                initial_childspecs,
+                child_order}).
 
 %%--------------------------------------------------------------------------
 %% Callback behaviour
 -spec create_tables() -> Result when
       Result :: 'ok'.
 
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) -> [{init,1}];
-behaviour_info(_Other)    -> undefined.
-
--endif.
-
 %%----------------------------------------------------------------------------
 
 start_link(Group, TxFun, Mod, Args) ->
@@ -288,7 +276,8 @@ start_internal(Group, TxFun, ChildSpecs) ->
 init({Group, TxFun, ChildSpecs}) ->
     {ok, #state{group              = Group,
                 tx_fun             = TxFun,
-                initial_childspecs = ChildSpecs}}.
+                initial_childspecs = ChildSpecs,
+                child_order = child_order_from(ChildSpecs)}}.
 
 handle_call({init, Overall}, _From,
             State = #state{overall            = undefined,
@@ -371,13 +360,16 @@ handle_info({'DOWN', _Ref, process, Pid, _Reason},
             State = #state{delegate = Delegate,
                            group    = Group,
                            tx_fun   = TxFun,
-                           overall  = O}) ->
+                           overall  = O,
+                           child_order = ChildOrder}) ->
     %% TODO load balance this
     %% No guarantee pg2 will have received the DOWN before us.
     R = case lists:sort(?PG2:get_members(Group)) -- [Pid] of
             [O | _] -> ChildSpecs =
                            TxFun(fun() -> update_all(O, Pid) end),
-                       [start(Delegate, ChildSpec) || ChildSpec <- ChildSpecs];
+                       [start(Delegate, ChildSpec)
+                        || ChildSpec <- restore_child_order(ChildSpecs,
+                           ChildOrder)];
             _       -> []
         end,
     case errors(R) of
@@ -515,3 +507,14 @@ add_proplists([{K1, _} = KV | P1], [{K2, _} | _] = P2, Acc) when K1 < K2 ->
     add_proplists(P1, P2, [KV | Acc]);
 add_proplists(P1, [KV | P2], Acc) ->
     add_proplists(P1, P2, [KV | Acc]).
+
+child_order_from(ChildSpecs) ->
+    lists:zipwith(fun(C, N) ->
+                          {id(C), N}
+                  end, ChildSpecs, lists:seq(1, length(ChildSpecs))).
+
+restore_child_order(ChildSpecs, ChildOrder) ->
+    lists:sort(fun(A, B) ->
+                       proplists:get_value(id(A), ChildOrder)
+                           < proplists:get_value(id(B), ChildOrder)
+               end, ChildSpecs).
index cdfdc1c0c0b77993783bdb66dac63a803f0ece04..5263333f56bc5ea5ee8c23a92c9d5bed934eb92c 100644 (file)
 
 -record(state, {dict, module}).
 
--ifdef(use_specs).
-
 %%----------------------------------------------------------------------------
 
 -export_type([?MODULE/0]).
 
--opaque(?MODULE() :: #state{dict   :: dict:dict(),
+-include("include/old_builtin_types.hrl").
+
+-opaque(?MODULE() :: #state{dict   :: ?DICT_TYPE(),
                             module :: atom()}).
 
 -type(item()         :: pid() | {atom(), node()}).
 
--spec(new/0          :: () -> ?MODULE()).
--spec(new/1          :: ('erlang' | 'delegate') -> ?MODULE()).
--spec(monitor/2      :: (item(), ?MODULE()) -> ?MODULE()).
--spec(monitor_all/2  :: ([item()], ?MODULE()) -> ?MODULE()).
--spec(demonitor/2    :: (item(), ?MODULE()) -> ?MODULE()).
--spec(is_monitored/2 :: (item(), ?MODULE()) -> boolean()).
--spec(erase/2        :: (item(), ?MODULE()) -> ?MODULE()).
--spec(monitored/1    :: (?MODULE()) -> [item()]).
--spec(is_empty/1     :: (?MODULE()) -> boolean()).
-
--endif.
+-spec new() -> ?MODULE().
+-spec new('erlang' | 'delegate') -> ?MODULE().
+-spec monitor(item(), ?MODULE()) -> ?MODULE().
+-spec monitor_all([item()], ?MODULE()) -> ?MODULE().
+-spec demonitor(item(), ?MODULE()) -> ?MODULE().
+-spec is_monitored(item(), ?MODULE()) -> boolean().
+-spec erase(item(), ?MODULE()) -> ?MODULE().
+-spec monitored(?MODULE()) -> [item()].
+-spec is_empty(?MODULE()) -> boolean().
 
 new() -> new(erlang).
 
index 0eab49b3db13bed322383f9af9f6982b0e709e41..81969477c34661cea2a0b7caf9f9a06edc34cc85 100644 (file)
@@ -45,8 +45,6 @@
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([q/0]).
 
 -type(q() :: pqueue()).
 -type(squeue() :: {queue, [any()], [any()], non_neg_integer()}).
 -type(pqueue() ::  squeue() | {pqueue, [{priority(), squeue()}]}).
 
--spec(new/0 :: () -> pqueue()).
--spec(is_queue/1 :: (any()) -> boolean()).
--spec(is_empty/1 :: (pqueue()) -> boolean()).
--spec(len/1 :: (pqueue()) -> non_neg_integer()).
--spec(to_list/1 :: (pqueue()) -> [{priority(), any()}]).
--spec(from_list/1 :: ([{priority(), any()}]) -> pqueue()).
--spec(in/2 :: (any(), pqueue()) -> pqueue()).
--spec(in/3 :: (any(), priority(), pqueue()) -> pqueue()).
--spec(out/1 :: (pqueue()) -> {empty | {value, any()}, pqueue()}).
--spec(out_p/1 :: (pqueue()) -> {empty | {value, any(), priority()}, pqueue()}).
--spec(join/2 :: (pqueue(), pqueue()) -> pqueue()).
--spec(filter/2 :: (fun ((any()) -> boolean()), pqueue()) -> pqueue()).
--spec(fold/3 ::
-        (fun ((any(), priority(), A) -> A), A, pqueue()) -> A).
--spec(highest/1 :: (pqueue()) -> priority() | 'empty').
-
--endif.
+-spec new() -> pqueue().
+-spec is_queue(any()) -> boolean().
+-spec is_empty(pqueue()) -> boolean().
+-spec len(pqueue()) -> non_neg_integer().
+-spec to_list(pqueue()) -> [{priority(), any()}].
+-spec from_list([{priority(), any()}]) -> pqueue().
+-spec in(any(), pqueue()) -> pqueue().
+-spec in(any(), priority(), pqueue()) -> pqueue().
+-spec out(pqueue()) -> {empty | {value, any()}, pqueue()}.
+-spec out_p(pqueue()) -> {empty | {value, any(), priority()}, pqueue()}.
+-spec join(pqueue(), pqueue()) -> pqueue().
+-spec filter(fun ((any()) -> boolean()), pqueue()) -> pqueue().
+-spec fold
+        (fun ((any(), priority(), A) -> A), A, pqueue()) -> A.
+-spec highest(pqueue()) -> priority() | 'empty'.
 
 %%----------------------------------------------------------------------------
 
index c4975b5c8d16e725d49162aee50c284c77598f35..27b352ad4eb51b79107bccbfc31d05cfaa9fba4f 100644 (file)
@@ -25,7 +25,7 @@
          check_exclusive_access/2, with_exclusive_access_or_die/3,
          stat/1, deliver/2, requeue/3, ack/3, reject/4]).
 -export([list/0, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2,
-         info_all/4]).
+         info_all/6, info_local/1]).
 -export([list_down/1]).
 -export([force_event_refresh/1, notify_policy_changed/1]).
 -export([consumers/1, consumers_all/1,  consumers_all/3, consumer_info_keys/0]).
@@ -37,6 +37,8 @@
 -export([start_mirroring/1, stop_mirroring/1, sync_mirrors/1,
          cancel_sync_mirrors/1]).
 
+-export([pid_of/1, pid_of/2]).
+
 %% internal
 -export([internal_declare/2, internal_delete/1, run_backing_queue/3,
          set_ram_duration_target/2, set_maximum_since_use/2]).
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([name/0, qmsg/0, absent_reason/0]).
 
--type(name() :: rabbit_types:r('queue')).
--type(qpids() :: [pid()]).
--type(qlen() :: rabbit_types:ok(non_neg_integer())).
--type(qfun(A) :: fun ((rabbit_types:amqqueue()) -> A | no_return())).
--type(qmsg() :: {name(), pid(), msg_id(), boolean(), rabbit_types:message()}).
--type(msg_id() :: non_neg_integer()).
--type(ok_or_errors() ::
-        'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}).
--type(absent_reason() :: 'nodedown' | 'crashed').
--type(queue_or_absent() :: rabbit_types:amqqueue() |
-                           {'absent', rabbit_types:amqqueue(),absent_reason()}).
--type(not_found_or_absent() ::
-        'not_found' | {'absent', rabbit_types:amqqueue(), absent_reason()}).
--spec(recover/0 :: () -> [rabbit_types:amqqueue()]).
--spec(stop/0 :: () -> 'ok').
--spec(start/1 :: ([rabbit_types:amqqueue()]) -> 'ok').
--spec(declare/5 ::
-        (name(), boolean(), boolean(),
-         rabbit_framing:amqp_table(), rabbit_types:maybe(pid()))
-        -> {'new' | 'existing' | 'absent' | 'owner_died',
-            rabbit_types:amqqueue()} | rabbit_types:channel_exit()).
--spec(declare/6 ::
-        (name(), boolean(), boolean(),
-         rabbit_framing:amqp_table(), rabbit_types:maybe(pid()), node())
-        -> {'new' | 'existing' | 'owner_died', rabbit_types:amqqueue()} |
-           {'absent', rabbit_types:amqqueue(), absent_reason()} |
-           rabbit_types:channel_exit()).
--spec(internal_declare/2 ::
-        (rabbit_types:amqqueue(), boolean())
-        -> queue_or_absent() | rabbit_misc:thunk(queue_or_absent())).
--spec(update/2 ::
-        (name(),
-         fun((rabbit_types:amqqueue()) -> rabbit_types:amqqueue()))
-         -> 'not_found' | rabbit_types:amqqueue()).
--spec(lookup/1 ::
-        (name()) -> rabbit_types:ok(rabbit_types:amqqueue()) |
-                    rabbit_types:error('not_found');
-        ([name()]) -> [rabbit_types:amqqueue()]).
--spec(not_found_or_absent/1 :: (name()) -> not_found_or_absent()).
--spec(with/2 :: (name(), qfun(A)) ->
-                     A | rabbit_types:error(not_found_or_absent())).
--spec(with/3 :: (name(), qfun(A), fun((not_found_or_absent()) -> B)) -> A | B).
--spec(with_or_die/2 ::
-        (name(), qfun(A)) -> A | rabbit_types:channel_exit()).
--spec(assert_equivalence/5 ::
+-type name() :: rabbit_types:r('queue').
+-type qpids() :: [pid()].
+-type qlen() :: rabbit_types:ok(non_neg_integer()).
+-type qfun(A) :: fun ((rabbit_types:amqqueue()) -> A | no_return()).
+-type qmsg() :: {name(), pid(), msg_id(), boolean(), rabbit_types:message()}.
+-type msg_id() :: non_neg_integer().
+-type ok_or_errors() ::
+        'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}.
+-type absent_reason() :: 'nodedown' | 'crashed'.
+-type queue_or_absent() :: rabbit_types:amqqueue() |
+                           {'absent', rabbit_types:amqqueue(),absent_reason()}.
+-type not_found_or_absent() ::
+        'not_found' | {'absent', rabbit_types:amqqueue(), absent_reason()}.
+-spec recover() -> [rabbit_types:amqqueue()].
+-spec stop() -> 'ok'.
+-spec start([rabbit_types:amqqueue()]) -> 'ok'.
+-spec declare
+        (name(), boolean(), boolean(), rabbit_framing:amqp_table(),
+         rabbit_types:maybe(pid())) ->
+            {'new' | 'existing' | 'absent' | 'owner_died',
+             rabbit_types:amqqueue()} |
+            rabbit_types:channel_exit().
+-spec declare
+        (name(), boolean(), boolean(), rabbit_framing:amqp_table(),
+         rabbit_types:maybe(pid()), node()) ->
+            {'new' | 'existing' | 'owner_died', rabbit_types:amqqueue()} |
+            {'absent', rabbit_types:amqqueue(), absent_reason()} |
+            rabbit_types:channel_exit().
+-spec internal_declare(rabbit_types:amqqueue(), boolean()) ->
+          queue_or_absent() | rabbit_misc:thunk(queue_or_absent()).
+-spec update
+        (name(), fun((rabbit_types:amqqueue()) -> rabbit_types:amqqueue())) ->
+            'not_found' | rabbit_types:amqqueue().
+-spec lookup
+        (name()) ->
+            rabbit_types:ok(rabbit_types:amqqueue()) |
+            rabbit_types:error('not_found');
+        ([name()]) ->
+            [rabbit_types:amqqueue()].
+-spec not_found_or_absent(name()) -> not_found_or_absent().
+-spec with(name(), qfun(A)) ->
+          A | rabbit_types:error(not_found_or_absent()).
+-spec with(name(), qfun(A), fun((not_found_or_absent()) -> B)) -> A | B.
+-spec with_or_die(name(), qfun(A)) -> A | rabbit_types:channel_exit().
+-spec assert_equivalence
         (rabbit_types:amqqueue(), boolean(), boolean(),
-         rabbit_framing:amqp_table(), rabbit_types:maybe(pid()))
-        -> 'ok' | rabbit_types:channel_exit() |
-           rabbit_types:connection_exit()).
--spec(check_exclusive_access/2 ::
-        (rabbit_types:amqqueue(), pid())
-        -> 'ok' | rabbit_types:channel_exit()).
--spec(with_exclusive_access_or_die/3 ::
-        (name(), pid(), qfun(A)) -> A | rabbit_types:channel_exit()).
--spec(list/0 :: () -> [rabbit_types:amqqueue()]).
--spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:amqqueue()]).
--spec(list_down/1 :: (rabbit_types:vhost()) -> [rabbit_types:amqqueue()]).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (rabbit_types:amqqueue()) -> rabbit_types:infos()).
--spec(info/2 ::
-        (rabbit_types:amqqueue(), rabbit_types:info_keys())
-        -> rabbit_types:infos()).
--spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
--spec(info_all/2 :: (rabbit_types:vhost(), rabbit_types:info_keys())
-                    -> [rabbit_types:infos()]).
--spec(info_all/4 :: (rabbit_types:vhost(), rabbit_types:info_keys(),
-                     reference(), pid()) -> 'ok').
--spec(force_event_refresh/1 :: (reference()) -> 'ok').
--spec(notify_policy_changed/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(consumers/1 :: (rabbit_types:amqqueue())
-                     -> [{pid(), rabbit_types:ctag(), boolean(),
-                          non_neg_integer(), rabbit_framing:amqp_table()}]).
--spec(consumer_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(consumers_all/1 ::
-        (rabbit_types:vhost())
-        -> [{name(), pid(), rabbit_types:ctag(), boolean(),
-             non_neg_integer(), rabbit_framing:amqp_table()}]).
--spec(consumers_all/3 ::
-        (rabbit_types:vhost(), reference(), pid())
-        -> 'ok').
--spec(stat/1 ::
-        (rabbit_types:amqqueue())
-        -> {'ok', non_neg_integer(), non_neg_integer()}).
--spec(delete_immediately/1 :: (qpids()) -> 'ok').
--spec(delete/3 ::
-        (rabbit_types:amqqueue(), 'false', 'false')
-        -> qlen();
-        (rabbit_types:amqqueue(), 'true' , 'false')
-        -> qlen() | rabbit_types:error('in_use');
-        (rabbit_types:amqqueue(), 'false', 'true' )
-        -> qlen() | rabbit_types:error('not_empty');
-        (rabbit_types:amqqueue(), 'true' , 'true' )
-        -> qlen() |
-           rabbit_types:error('in_use') |
-           rabbit_types:error('not_empty')).
--spec(delete_crashed/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(delete_crashed_internal/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(purge/1 :: (rabbit_types:amqqueue()) -> qlen()).
--spec(forget_all_durable/1 :: (node()) -> 'ok').
--spec(deliver/2 :: ([rabbit_types:amqqueue()], rabbit_types:delivery()) ->
-                        qpids()).
--spec(requeue/3 :: (pid(), [msg_id()],  pid()) -> 'ok').
--spec(ack/3 :: (pid(), [msg_id()], pid()) -> 'ok').
--spec(reject/4 :: (pid(), [msg_id()], boolean(), pid()) -> 'ok').
--spec(notify_down_all/2 :: (qpids(), pid()) -> ok_or_errors()).
--spec(notify_down_all/3 :: (qpids(), pid(), non_neg_integer())
-                           -> ok_or_errors()).
--spec(activate_limit_all/2 :: (qpids(), pid()) -> ok_or_errors()).
--spec(basic_get/4 :: (rabbit_types:amqqueue(), pid(), boolean(), pid()) ->
-                          {'ok', non_neg_integer(), qmsg()} | 'empty').
--spec(credit/5 :: (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(),
-                   non_neg_integer(), boolean()) -> 'ok').
--spec(basic_consume/10 ::
+         rabbit_framing:amqp_table(), rabbit_types:maybe(pid())) ->
+            'ok' | rabbit_types:channel_exit() | rabbit_types:connection_exit().
+-spec check_exclusive_access(rabbit_types:amqqueue(), pid()) ->
+          'ok' | rabbit_types:channel_exit().
+-spec with_exclusive_access_or_die(name(), pid(), qfun(A)) ->
+          A | rabbit_types:channel_exit().
+-spec list() -> [rabbit_types:amqqueue()].
+-spec list(rabbit_types:vhost()) -> [rabbit_types:amqqueue()].
+-spec list_down(rabbit_types:vhost()) -> [rabbit_types:amqqueue()].
+-spec info_keys() -> rabbit_types:info_keys().
+-spec info(rabbit_types:amqqueue()) -> rabbit_types:infos().
+-spec info(rabbit_types:amqqueue(), rabbit_types:info_keys()) ->
+          rabbit_types:infos().
+-spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()].
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys()) ->
+          [rabbit_types:infos()].
+-spec info_all
+        (rabbit_types:vhost(), rabbit_types:info_keys(), boolean(), boolean(),
+         reference(), pid()) ->
+            'ok'.
+-spec force_event_refresh(reference()) -> 'ok'.
+-spec notify_policy_changed(rabbit_types:amqqueue()) -> 'ok'.
+-spec consumers(rabbit_types:amqqueue()) ->
+          [{pid(), rabbit_types:ctag(), boolean(), non_neg_integer(),
+            rabbit_framing:amqp_table()}].
+-spec consumer_info_keys() -> rabbit_types:info_keys().
+-spec consumers_all(rabbit_types:vhost()) ->
+          [{name(), pid(), rabbit_types:ctag(), boolean(),
+            non_neg_integer(), rabbit_framing:amqp_table()}].
+-spec consumers_all(rabbit_types:vhost(), reference(), pid()) -> 'ok'.
+-spec stat(rabbit_types:amqqueue()) ->
+          {'ok', non_neg_integer(), non_neg_integer()}.
+-spec delete_immediately(qpids()) -> 'ok'.
+-spec delete
+        (rabbit_types:amqqueue(), 'false', 'false') ->
+            qlen();
+        (rabbit_types:amqqueue(), 'true' , 'false') ->
+            qlen() | rabbit_types:error('in_use');
+        (rabbit_types:amqqueue(), 'false', 'true' ) ->
+            qlen() | rabbit_types:error('not_empty');
+        (rabbit_types:amqqueue(), 'true' , 'true' ) ->
+            qlen() |
+            rabbit_types:error('in_use') |
+            rabbit_types:error('not_empty').
+-spec delete_crashed(rabbit_types:amqqueue()) -> 'ok'.
+-spec delete_crashed_internal(rabbit_types:amqqueue()) -> 'ok'.
+-spec purge(rabbit_types:amqqueue()) -> qlen().
+-spec forget_all_durable(node()) -> 'ok'.
+-spec deliver([rabbit_types:amqqueue()], rabbit_types:delivery()) ->
+                        qpids().
+-spec requeue(pid(), [msg_id()],  pid()) -> 'ok'.
+-spec ack(pid(), [msg_id()], pid()) -> 'ok'.
+-spec reject(pid(), [msg_id()], boolean(), pid()) -> 'ok'.
+-spec notify_down_all(qpids(), pid()) -> ok_or_errors().
+-spec notify_down_all(qpids(), pid(), non_neg_integer()) ->
+          ok_or_errors().
+-spec activate_limit_all(qpids(), pid()) -> ok_or_errors().
+-spec basic_get(rabbit_types:amqqueue(), pid(), boolean(), pid()) ->
+          {'ok', non_neg_integer(), qmsg()} | 'empty'.
+-spec credit
+        (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(), non_neg_integer(),
+         boolean()) ->
+            'ok'.
+-spec basic_consume
         (rabbit_types:amqqueue(), boolean(), pid(), pid(), boolean(),
          non_neg_integer(), rabbit_types:ctag(), boolean(),
-         rabbit_framing:amqp_table(), any())
-        -> rabbit_types:ok_or_error('exclusive_consume_unavailable')).
--spec(basic_cancel/4 ::
-        (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(), any()) -> 'ok').
--spec(notify_decorators/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(notify_sent/2 :: (pid(), pid()) -> 'ok').
--spec(notify_sent_queue_down/1 :: (pid()) -> 'ok').
--spec(resume/2 :: (pid(), pid()) -> 'ok').
--spec(internal_delete/1 ::
-        (name()) -> rabbit_types:ok_or_error('not_found') |
-                    rabbit_types:connection_exit() |
-                    fun (() -> rabbit_types:ok_or_error('not_found') |
-                               rabbit_types:connection_exit())).
--spec(run_backing_queue/3 ::
-        (pid(), atom(),
-         (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok').
--spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok').
--spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
--spec(on_node_up/1 :: (node()) -> 'ok').
--spec(on_node_down/1 :: (node()) -> 'ok').
--spec(pseudo_queue/2 :: (name(), pid()) -> rabbit_types:amqqueue()).
--spec(immutable/1 :: (rabbit_types:amqqueue()) -> rabbit_types:amqqueue()).
--spec(store_queue/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(update_decorators/1 :: (name()) -> 'ok').
--spec(policy_changed/2 ::
-        (rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok').
--spec(start_mirroring/1 :: (pid()) -> 'ok').
--spec(stop_mirroring/1 :: (pid()) -> 'ok').
--spec(sync_mirrors/1 :: (pid()) -> 'ok' | rabbit_types:error('not_mirrored')).
--spec(cancel_sync_mirrors/1 :: (pid()) -> 'ok' | {'ok', 'not_syncing'}).
-
--endif.
+         rabbit_framing:amqp_table(), any()) ->
+            rabbit_types:ok_or_error('exclusive_consume_unavailable').
+-spec basic_cancel
+        (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(), any()) -> 'ok'.
+-spec notify_decorators(rabbit_types:amqqueue()) -> 'ok'.
+-spec notify_sent(pid(), pid()) -> 'ok'.
+-spec notify_sent_queue_down(pid()) -> 'ok'.
+-spec resume(pid(), pid()) -> 'ok'.
+-spec internal_delete(name()) ->
+          rabbit_types:ok_or_error('not_found') |
+          rabbit_types:connection_exit() |
+          fun (() ->
+              rabbit_types:ok_or_error('not_found') |
+              rabbit_types:connection_exit()).
+-spec run_backing_queue
+        (pid(), atom(), (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) ->
+            'ok'.
+-spec set_ram_duration_target(pid(), number() | 'infinity') -> 'ok'.
+-spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'.
+-spec on_node_up(node()) -> 'ok'.
+-spec on_node_down(node()) -> 'ok'.
+-spec pseudo_queue(name(), pid()) -> rabbit_types:amqqueue().
+-spec immutable(rabbit_types:amqqueue()) -> rabbit_types:amqqueue().
+-spec store_queue(rabbit_types:amqqueue()) -> 'ok'.
+-spec update_decorators(name()) -> 'ok'.
+-spec policy_changed(rabbit_types:amqqueue(), rabbit_types:amqqueue()) ->
+          'ok'.
+-spec start_mirroring(pid()) -> 'ok'.
+-spec stop_mirroring(pid()) -> 'ok'.
+-spec sync_mirrors(rabbit_types:amqqueue() | pid()) ->
+          'ok' | rabbit_types:error('not_mirrored').
+-spec cancel_sync_mirrors(rabbit_types:amqqueue() | pid()) ->
+          'ok' | {'ok', 'not_syncing'}.
+
+-spec pid_of(rabbit_types:amqqueue()) ->
+          {'ok', pid()} | rabbit_types:error('not_found').
+-spec pid_of(rabbit_types:vhost(), rabbit_misc:resource_name()) ->
+          {'ok', pid()} | rabbit_types:error('not_found').
 
 %%----------------------------------------------------------------------------
 
@@ -624,13 +627,24 @@ info_all(VHostPath, Items) ->
     map(list(VHostPath), fun (Q) -> info(Q, Items) end) ++
         map(list_down(VHostPath), fun (Q) -> info_down(Q, Items, down) end).
 
-info_all(VHostPath, Items, Ref, AggregatorPid) ->
-    rabbit_control_misc:emitting_map_with_exit_handler(
-      AggregatorPid, Ref, fun(Q) -> info(Q, Items) end, list(VHostPath),
-      continue),
-    rabbit_control_misc:emitting_map_with_exit_handler(
-      AggregatorPid, Ref, fun(Q) -> info_down(Q, Items) end,
-      list_down(VHostPath)).
+info_all(VHostPath, Items, NeedOnline, NeedOffline, Ref, AggregatorPid) ->
+    NeedOnline andalso rabbit_control_misc:emitting_map_with_exit_handler(
+                         AggregatorPid, Ref, fun(Q) -> info(Q, Items) end, list(VHostPath),
+                         continue),
+    NeedOffline andalso rabbit_control_misc:emitting_map_with_exit_handler(
+                          AggregatorPid, Ref, fun(Q) -> info_down(Q, Items, down) end,
+                          list_down(VHostPath),
+                          continue),
+    %% Previous maps are incomplete, finalize emission
+    rabbit_control_misc:emitting_map(AggregatorPid, Ref, fun(_) -> no_op end, []).
+
+info_local(VHostPath) ->
+    map(list_local(VHostPath), fun (Q) -> info(Q, [name]) end).
+
+list_local(VHostPath) ->
+    [ Q || #amqqueue{state = State, pid=QPid} = Q <- list(VHostPath),
+           State =/= crashed,
+           node() =:= node(QPid) ].
 
 force_event_refresh(Ref) ->
     [gen_server2:cast(Q#amqqueue.pid,
@@ -658,14 +672,20 @@ consumers_all(VHostPath, Ref, AggregatorPid) ->
       list(VHostPath)).
 
 get_queue_consumer_info(Q, ConsumerInfoKeys) ->
-    lists:flatten(
-      [lists:zip(ConsumerInfoKeys,
-                 [Q#amqqueue.name, ChPid, CTag,
-                  AckRequired, Prefetch, Args]) ||
-          {ChPid, CTag, AckRequired, Prefetch, Args} <- consumers(Q)]).
+    [lists:zip(ConsumerInfoKeys,
+               [Q#amqqueue.name, ChPid, CTag,
+                AckRequired, Prefetch, Args]) ||
+        {ChPid, CTag, AckRequired, Prefetch, Args} <- consumers(Q)].
 
 stat(#amqqueue{pid = QPid}) -> delegate:call(QPid, stat).
 
+pid_of(#amqqueue{pid = Pid}) -> Pid.
+pid_of(VHost, QueueName) ->
+  case lookup(rabbit_misc:r(VHost, queue, QueueName)) of
+    {ok, Q}                -> pid_of(Q);
+    {error, not_found} = E -> E
+  end.
+
 delete_immediately(QPids) ->
     [gen_server2:cast(QPid, delete_immediately) || QPid <- QPids],
     ok.
@@ -845,8 +865,10 @@ set_maximum_since_use(QPid, Age) ->
 start_mirroring(QPid) -> ok = delegate:cast(QPid, start_mirroring).
 stop_mirroring(QPid)  -> ok = delegate:cast(QPid, stop_mirroring).
 
-sync_mirrors(QPid)        -> delegate:call(QPid, sync_mirrors).
-cancel_sync_mirrors(QPid) -> delegate:call(QPid, cancel_sync_mirrors).
+sync_mirrors(#amqqueue{pid = QPid}) -> delegate:call(QPid, sync_mirrors);
+sync_mirrors(QPid)                  -> delegate:call(QPid, sync_mirrors).
+cancel_sync_mirrors(#amqqueue{pid = QPid}) -> delegate:call(QPid, cancel_sync_mirrors);
+cancel_sync_mirrors(QPid)                  -> delegate:call(QPid, cancel_sync_mirrors).
 
 on_node_up(Node) ->
     ok = rabbit_misc:execute_mnesia_transaction(
similarity index 95%
rename from rabbitmq-server/src/rabbit_auth_backend_dummy.erl
rename to rabbitmq-server/deps/rabbit_common/src/rabbit_auth_backend_dummy.erl
index 0077b4c99372c57593059d7f2ed7862a01d24385..8ac19299f9c09a041f8d85b6f0d93d03335a8d66 100644 (file)
 -export([user_login_authentication/2, user_login_authorization/1,
          check_vhost_access/3, check_resource_access/3]).
 
--ifdef(use_specs).
-
--spec(user/0 :: () -> rabbit_types:user()).
-
--endif.
+-spec user() -> rabbit_types:user().
 
 %% A user to be used by the direct client when permission checks are
 %% not needed. This user can do anything AMQPish.
similarity index 85%
rename from rabbitmq-server/src/rabbit_auth_backend_internal.erl
rename to rabbitmq-server/deps/rabbit_common/src/rabbit_auth_backend_internal.erl
index d7705d8e7b701025cfb7778368bea86d501edf7b..fdd954a4e0c4eea2dafdd132a9f201a01b4e1de9 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--type(regexp() :: binary()).
-
--spec(add_user/2 :: (rabbit_types:username(), rabbit_types:password()) -> 'ok').
--spec(delete_user/1 :: (rabbit_types:username()) -> 'ok').
--spec(lookup_user/1 :: (rabbit_types:username())
-                       -> rabbit_types:ok(rabbit_types:internal_user())
-                              | rabbit_types:error('not_found')).
--spec(change_password/2 :: (rabbit_types:username(), rabbit_types:password())
-                           -> 'ok').
--spec(clear_password/1 :: (rabbit_types:username()) -> 'ok').
--spec(hash_password/2 :: (module(), rabbit_types:password())
-                         -> rabbit_types:password_hash()).
--spec(change_password_hash/2 :: (rabbit_types:username(),
-                                 rabbit_types:password_hash()) -> 'ok').
--spec(set_tags/2 :: (rabbit_types:username(), [atom()]) -> 'ok').
--spec(set_permissions/5 ::(rabbit_types:username(), rabbit_types:vhost(),
-                           regexp(), regexp(), regexp()) -> 'ok').
--spec(clear_permissions/2 :: (rabbit_types:username(), rabbit_types:vhost())
-                             -> 'ok').
--spec(user_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(perms_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(user_perms_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(vhost_perms_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(user_vhost_perms_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(list_users/0 :: () -> [rabbit_types:infos()]).
--spec(list_users/2 :: (reference(), pid()) -> 'ok').
--spec(list_permissions/0 :: () -> [rabbit_types:infos()]).
--spec(list_user_permissions/1 ::
-        (rabbit_types:username()) -> [rabbit_types:infos()]).
--spec(list_user_permissions/3 ::
-        (rabbit_types:username(), reference(), pid()) -> 'ok').
--spec(list_vhost_permissions/1 ::
-        (rabbit_types:vhost()) -> [rabbit_types:infos()]).
--spec(list_vhost_permissions/3 ::
-        (rabbit_types:vhost(), reference(), pid()) -> 'ok').
--spec(list_user_vhost_permissions/2 ::
-        (rabbit_types:username(), rabbit_types:vhost())
-        -> [rabbit_types:infos()]).
-
--endif.
+-type regexp() :: binary().
+
+-spec add_user(rabbit_types:username(), rabbit_types:password()) -> 'ok'.
+-spec delete_user(rabbit_types:username()) -> 'ok'.
+-spec lookup_user
+        (rabbit_types:username()) ->
+            rabbit_types:ok(rabbit_types:internal_user()) |
+            rabbit_types:error('not_found').
+-spec change_password
+        (rabbit_types:username(), rabbit_types:password()) -> 'ok'.
+-spec clear_password(rabbit_types:username()) -> 'ok'.
+-spec hash_password
+        (module(), rabbit_types:password()) -> rabbit_types:password_hash().
+-spec change_password_hash
+        (rabbit_types:username(), rabbit_types:password_hash()) -> 'ok'.
+-spec set_tags(rabbit_types:username(), [atom()]) -> 'ok'.
+-spec set_permissions
+        (rabbit_types:username(), rabbit_types:vhost(), regexp(), regexp(),
+         regexp()) ->
+            'ok'.
+-spec clear_permissions
+        (rabbit_types:username(), rabbit_types:vhost()) -> 'ok'.
+-spec user_info_keys() -> rabbit_types:info_keys().
+-spec perms_info_keys() -> rabbit_types:info_keys().
+-spec user_perms_info_keys() -> rabbit_types:info_keys().
+-spec vhost_perms_info_keys() -> rabbit_types:info_keys().
+-spec user_vhost_perms_info_keys() -> rabbit_types:info_keys().
+-spec list_users() -> [rabbit_types:infos()].
+-spec list_users(reference(), pid()) -> 'ok'.
+-spec list_permissions() -> [rabbit_types:infos()].
+-spec list_user_permissions
+        (rabbit_types:username()) -> [rabbit_types:infos()].
+-spec list_user_permissions
+        (rabbit_types:username(), reference(), pid()) -> 'ok'.
+-spec list_vhost_permissions
+        (rabbit_types:vhost()) -> [rabbit_types:infos()].
+-spec list_vhost_permissions
+        (rabbit_types:vhost(), reference(), pid()) -> 'ok'.
+-spec list_user_vhost_permissions
+        (rabbit_types:username(), rabbit_types:vhost()) -> [rabbit_types:infos()].
 
 %%----------------------------------------------------------------------------
 %% Implementation of rabbit_auth_backend
@@ -96,17 +94,21 @@ hashing_module_for_user(#internal_user{
 
 user_login_authentication(Username, []) ->
     internal_check_user_login(Username, fun(_) -> true end);
-user_login_authentication(Username, [{password, Cleartext}]) ->
-    internal_check_user_login(
-      Username,
-      fun (#internal_user{password_hash = <<Salt:4/binary, Hash/binary>>} = U) ->
-          Hash =:= rabbit_password:salted_hash(
-              hashing_module_for_user(U), Salt, Cleartext);
-          (#internal_user{}) ->
-              false
-      end);
 user_login_authentication(Username, AuthProps) ->
-    exit({unknown_auth_props, Username, AuthProps}).
+    case lists:keyfind(password, 1, AuthProps) of
+        {password, Cleartext} ->
+            internal_check_user_login(
+              Username,
+              fun (#internal_user{
+                        password_hash = <<Salt:4/binary, Hash/binary>>
+                    } = U) ->
+                  Hash =:= rabbit_password:salted_hash(
+                      hashing_module_for_user(U), Salt, Cleartext);
+                  (#internal_user{}) ->
+                      false
+              end);
+        false -> exit({unknown_auth_props, Username, AuthProps})
+    end.
 
 user_login_authorization(Username) ->
     case user_login_authentication(Username, []) of
index c50a429a6fdb098c2187a96ebdd3121a5f08358d..4c41502b065029de3132b33758dea9011476ee99 100644 (file)
@@ -16,8 +16,6 @@
 
 -module(rabbit_auth_mechanism).
 
--ifdef(use_specs).
-
 %% A description.
 -callback description() -> [proplists:property()].
 
     {'challenge', binary(), any()} |
     {'protocol_error', string(), [any()]} |
     {'refused', rabbit_types:username() | none, string(), [any()]}.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{description, 0}, {should_offer, 1}, {init, 1}, {handle_response, 2}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
index 47404df87ad16c192570f7131f6613bb5963726e..45f3c46109b7add68930d9afb9abf46ef0bd974b 100644 (file)
@@ -18,8 +18,6 @@
 
 -include("rabbit.hrl").
 
--ifdef(use_specs).
-
 %% Check a user can log in, given a username and a proplist of
 %% authentication information (e.g. [{password, Password}]). If your
 %% backend is not to be used for authentication, this should always
     {'ok', rabbit_types:auth_user()} |
     {'refused', string(), [any()]} |
     {'error', any()}.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{user_login_authentication, 2}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
index 331de8f0bd527979fa12f2d3f336505323335c8a..4315aaa9b169b12884a68b554d1f94b3a36e6bee 100644 (file)
@@ -18,8 +18,6 @@
 
 -include("rabbit.hrl").
 
--ifdef(use_specs).
-
 %% Check a user can log in, when this backend is being used for
 %% authorisation only. Authentication has already taken place
 %% successfully, but we need to check that the user exists in this
                                 rabbit_types:r(atom()),
                                 rabbit_access_control:permission_atom()) ->
     boolean() | {'error', any()}.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{user_login_authorization, 1},
-     {check_vhost_access, 3}, {check_resource_access, 3}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
index 862dbc9413e535de873279330e3c67bb8e8d8e8f..bb4d03acced4348975addbef38e7173010e59c0b 100644 (file)
                     message_bytes_persistent, head_message_timestamp,
                     disk_reads, disk_writes, backing_queue_status]).
 
--ifdef(use_specs).
-
 %% We can't specify a per-queue ack/state with callback signatures
--type(ack()   :: any()).
--type(state() :: any()).
-
--type(flow() :: 'flow' | 'noflow').
--type(msg_ids() :: [rabbit_types:msg_id()]).
--type(publish() :: {rabbit_types:basic_message(),
-                    rabbit_types:message_properties(), boolean()}).
--type(delivered_publish() :: {rabbit_types:basic_message(),
-                              rabbit_types:message_properties()}).
--type(fetch_result(Ack) ::
-        ('empty' | {rabbit_types:basic_message(), boolean(), Ack})).
--type(drop_result(Ack) ::
-        ('empty' | {rabbit_types:msg_id(), Ack})).
--type(recovery_terms() :: [term()] | 'non_clean_shutdown').
--type(recovery_info() :: 'new' | recovery_terms()).
--type(purged_msg_count() :: non_neg_integer()).
--type(async_callback() ::
-        fun ((atom(), fun ((atom(), state()) -> state())) -> 'ok')).
--type(duration() :: ('undefined' | 'infinity' | number())).
-
--type(msg_fun(A) :: fun ((rabbit_types:basic_message(), ack(), A) -> A)).
--type(msg_pred() :: fun ((rabbit_types:message_properties()) -> boolean())).
-
--type(queue_mode() :: atom()).
-
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
+-type ack()   :: any().
+-type state() :: any().
+
+-type flow() :: 'flow' | 'noflow'.
+-type msg_ids() :: [rabbit_types:msg_id()].
+-type publish() :: {rabbit_types:basic_message(),
+                    rabbit_types:message_properties(), boolean()}.
+-type delivered_publish() :: {rabbit_types:basic_message(),
+                              rabbit_types:message_properties()}.
+-type fetch_result(Ack) ::
+        ('empty' | {rabbit_types:basic_message(), boolean(), Ack}).
+-type drop_result(Ack) ::
+        ('empty' | {rabbit_types:msg_id(), Ack}).
+-type recovery_terms() :: [term()] | 'non_clean_shutdown'.
+-type recovery_info() :: 'new' | recovery_terms().
+-type purged_msg_count() :: non_neg_integer().
+-type async_callback() ::
+        fun ((atom(), fun ((atom(), state()) -> state())) -> 'ok').
+-type duration() :: ('undefined' | 'infinity' | number()).
+
+-type msg_fun(A) :: fun ((rabbit_types:basic_message(), ack(), A) -> A).
+-type msg_pred() :: fun ((rabbit_types:message_properties()) -> boolean()).
+
+-type queue_mode() :: atom().
+
+-spec info_keys() -> rabbit_types:info_keys().
 
 %% Called on startup with a list of durable queue names. The queues
 %% aren't being started at this point, but this call allows the
                             [ack()], Acc, state())
                            -> Acc.
 
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{start, 1}, {stop, 0}, {init, 3}, {terminate, 2},
-     {delete_and_terminate, 2}, {delete_crashed, 1}, {purge, 1},
-     {purge_acks, 1}, {publish, 6}, {publish_delivered, 5},
-     {batch_publish, 4}, {batch_publish_delivered, 4},
-     {discard, 4}, {drain_confirmed, 1},
-     {dropwhile, 2}, {fetchwhile, 4}, {fetch, 2},
-     {drop, 2}, {ack, 2}, {requeue, 2}, {ackfold, 4}, {fold, 3}, {len, 1},
-     {is_empty, 1}, {depth, 1}, {set_ram_duration_target, 2},
-     {ram_duration, 1}, {needs_timeout, 1}, {timeout, 1},
-     {handle_pre_hibernate, 1}, {resume, 1}, {msg_rates, 1},
-     {info, 2}, {invoke, 3}, {is_duplicate, 2}, {set_queue_mode, 2},
-     {zip_msgs_and_acks, 4}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
-
 info_keys() -> ?INFO_KEYS.
index ed71d8ba80a1f4df82d3b5ffcf48f62c8ff7a4f6..14f0a4e855dbc0dc33649ce43b845c5fa57d81cf 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
+-type properties_input() ::
+        rabbit_framing:amqp_property_record() | [{atom(), any()}].
+-type publish_result() ::
+        {ok, [pid()]} | rabbit_types:error('not_found').
+-type header() :: any().
+-type headers() :: rabbit_framing:amqp_table() | 'undefined'.
 
--type(properties_input() ::
-        (rabbit_framing:amqp_property_record() | [{atom(), any()}])).
--type(publish_result() ::
-        ({ok, [pid()]} | rabbit_types:error('not_found'))).
--type(header() :: any()).
--type(headers() :: rabbit_framing:amqp_table() | 'undefined').
+-type exchange_input() :: rabbit_types:exchange() | rabbit_exchange:name().
+-type body_input() :: binary() | [binary()].
 
--type(exchange_input() :: (rabbit_types:exchange() | rabbit_exchange:name())).
--type(body_input() :: (binary() | [binary()])).
-
--spec(publish/4 ::
+-spec publish
         (exchange_input(), rabbit_router:routing_key(), properties_input(),
-         body_input()) -> publish_result()).
--spec(publish/5 ::
+         body_input()) ->
+            publish_result().
+-spec publish
         (exchange_input(), rabbit_router:routing_key(), boolean(),
-         properties_input(), body_input()) -> publish_result()).
--spec(publish/1 ::
-        (rabbit_types:delivery()) -> publish_result()).
--spec(delivery/4 ::
+         properties_input(), body_input()) ->
+            publish_result().
+-spec publish(rabbit_types:delivery()) -> publish_result().
+-spec delivery
         (boolean(), boolean(), rabbit_types:message(), undefined | integer()) ->
-                         rabbit_types:delivery()).
--spec(message/4 ::
-        (rabbit_exchange:name(), rabbit_router:routing_key(),
-         properties_input(), binary()) -> rabbit_types:message()).
--spec(message/3 ::
+            rabbit_types:delivery().
+-spec message
+        (rabbit_exchange:name(), rabbit_router:routing_key(), properties_input(),
+         binary()) ->
+            rabbit_types:message().
+-spec message
         (rabbit_exchange:name(), rabbit_router:routing_key(),
          rabbit_types:decoded_content()) ->
-                        rabbit_types:ok_or_error2(rabbit_types:message(), any())).
--spec(properties/1 ::
-        (properties_input()) -> rabbit_framing:amqp_property_record()).
-
--spec(prepend_table_header/3 ::
-        (binary(), rabbit_framing:amqp_table(), headers()) -> headers()).
-
--spec(header/2 ::
-        (header(), headers()) -> 'undefined' | any()).
--spec(header/3 ::
-        (header(), headers(), any()) -> 'undefined' | any()).
-
--spec(extract_headers/1 :: (rabbit_types:content()) -> headers()).
-
--spec(map_headers/2 :: (fun((headers()) -> headers()), rabbit_types:content())
-                       -> rabbit_types:content()).
-
--spec(header_routes/1 ::
-        (undefined | rabbit_framing:amqp_table()) -> [string()]).
--spec(build_content/2 :: (rabbit_framing:amqp_property_record(),
-                          binary() | [binary()]) -> rabbit_types:content()).
--spec(from_content/1 :: (rabbit_types:content()) ->
-                             {rabbit_framing:amqp_property_record(), binary()}).
--spec(parse_expiration/1 ::
-        (rabbit_framing:amqp_property_record())
-        -> rabbit_types:ok_or_error2('undefined' | non_neg_integer(), any())).
-
--spec(msg_size/1 :: (rabbit_types:content() | rabbit_types:message()) ->
-                         non_neg_integer()).
-
--spec(maybe_gc_large_msg/1 ::
-        (rabbit_types:content() | rabbit_types:message()) -> non_neg_integer()).
-
--endif.
+            rabbit_types:ok_or_error2(rabbit_types:message(), any()).
+-spec properties
+        (properties_input()) -> rabbit_framing:amqp_property_record().
+
+-spec prepend_table_header
+        (binary(), rabbit_framing:amqp_table(), headers()) -> headers().
+
+-spec header(header(), headers()) -> 'undefined' | any().
+-spec header(header(), headers(), any()) -> 'undefined' | any().
+
+-spec extract_headers(rabbit_types:content()) -> headers().
+
+-spec map_headers
+        (fun((headers()) -> headers()), rabbit_types:content()) ->
+            rabbit_types:content().
+
+-spec header_routes(undefined | rabbit_framing:amqp_table()) -> [string()].
+-spec build_content
+        (rabbit_framing:amqp_property_record(), binary() | [binary()]) ->
+            rabbit_types:content().
+-spec from_content
+        (rabbit_types:content()) ->
+            {rabbit_framing:amqp_property_record(), binary()}.
+-spec parse_expiration
+        (rabbit_framing:amqp_property_record()) ->
+            rabbit_types:ok_or_error2('undefined' | non_neg_integer(), any()).
+
+-spec msg_size
+        (rabbit_types:content() | rabbit_types:message()) -> non_neg_integer().
+
+-spec maybe_gc_large_msg
+        (rabbit_types:content() | rabbit_types:message()) -> non_neg_integer().
 
 %%----------------------------------------------------------------------------
 
index a2a80d7b0efe045dec90a86c3200c742da7dd1dc..95d06ff5f52acbecfdecd84f0b9c1daa360b26ca 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
+-type frame() :: [binary()].
 
--type(frame() :: [binary()]).
-
--spec(build_simple_method_frame/3 ::
+-spec build_simple_method_frame
         (rabbit_channel:channel_number(), rabbit_framing:amqp_method_record(),
-         rabbit_types:protocol())
-        -> frame()).
--spec(build_simple_content_frames/4 ::
+         rabbit_types:protocol()) ->
+            frame().
+-spec build_simple_content_frames
         (rabbit_channel:channel_number(), rabbit_types:content(),
-         non_neg_integer(), rabbit_types:protocol())
-        -> [frame()]).
--spec(build_heartbeat_frame/0 :: () -> frame()).
--spec(generate_table/1 :: (rabbit_framing:amqp_table()) -> binary()).
--spec(check_empty_frame_size/0 :: () -> 'ok').
--spec(ensure_content_encoded/2 ::
+         non_neg_integer(), rabbit_types:protocol()) ->
+            [frame()].
+-spec build_heartbeat_frame() -> frame().
+-spec generate_table(rabbit_framing:amqp_table()) -> binary().
+-spec check_empty_frame_size() -> 'ok'.
+-spec ensure_content_encoded
         (rabbit_types:content(), rabbit_types:protocol()) ->
-                                       rabbit_types:encoded_content()).
--spec(clear_encoded_content/1 ::
-        (rabbit_types:content()) -> rabbit_types:unencoded_content()).
--spec(map_exception/3 :: (rabbit_channel:channel_number(),
-                          rabbit_types:amqp_error() | any(),
-                          rabbit_types:protocol()) ->
-                              {rabbit_channel:channel_number(),
-                               rabbit_framing:amqp_method_record()}).
-
--endif.
+            rabbit_types:encoded_content().
+-spec clear_encoded_content
+        (rabbit_types:content()) ->
+            rabbit_types:unencoded_content().
+-spec map_exception
+        (rabbit_channel:channel_number(), rabbit_types:amqp_error() | any(),
+         rabbit_types:protocol()) ->
+            {rabbit_channel:channel_number(),
+             rabbit_framing:amqp_method_record()}.
 
 %%----------------------------------------------------------------------------
 
index db8aca907d988f1ac4ab47ba8ed6ddd33d865689..b84e1203f9b1fa9ea76943be1596c63cf1551c94 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(parse_table/1 :: (binary()) -> rabbit_framing:amqp_table()).
--spec(ensure_content_decoded/1 ::
-        (rabbit_types:content()) -> rabbit_types:decoded_content()).
--spec(clear_decoded_content/1 ::
-        (rabbit_types:content()) -> rabbit_types:undecoded_content()).
--spec(validate_utf8/1 :: (binary()) -> 'ok' | 'error').
--spec(assert_utf8/1 :: (binary()) -> 'ok').
-
--endif.
+-spec parse_table(binary()) -> rabbit_framing:amqp_table().
+-spec ensure_content_decoded
+        (rabbit_types:content()) ->
+            rabbit_types:decoded_content().
+-spec clear_decoded_content
+        (rabbit_types:content()) ->
+            rabbit_types:undecoded_content().
+-spec validate_utf8(binary()) -> 'ok' | 'error'.
+-spec assert_utf8(binary()) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
index 13520d93b65bf1fa812047261830a9761d1c2d44..ab7d38ddece493693d0640eb552b5fdffe110d9d 100644 (file)
@@ -56,7 +56,7 @@
 -export([send_command/2, deliver/4, deliver_reply/2,
          send_credit_reply/2, send_drained/2]).
 -export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1,
-         info_all/3]).
+         info_all/3, info_local/1]).
 -export([refresh_config_local/0, ready_for_close/1]).
 -export([force_event_refresh/1]).
 
          acks_uncommitted,
          prefetch_count,
          global_prefetch_count,
-         state]).
+         state,
+         reductions,
+         garbage_collection]).
 
 -define(CREATION_EVENT_KEYS,
         [pid,
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([channel_number/0]).
 
--type(channel_number() :: non_neg_integer()).
+-type channel_number() :: non_neg_integer().
 
 -export_type([channel/0]).
 
--type(channel() :: #ch{}).
-
--spec(start_link/11 ::
-        (channel_number(), pid(), pid(), pid(), string(),
-         rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
-         rabbit_framing:amqp_table(), pid(), pid()) ->
-                            rabbit_types:ok_pid_or_error()).
--spec(do/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(do/3 :: (pid(), rabbit_framing:amqp_method_record(),
-               rabbit_types:maybe(rabbit_types:content())) -> 'ok').
--spec(do_flow/3 :: (pid(), rabbit_framing:amqp_method_record(),
-                    rabbit_types:maybe(rabbit_types:content())) -> 'ok').
--spec(flush/1 :: (pid()) -> 'ok').
--spec(shutdown/1 :: (pid()) -> 'ok').
--spec(send_command/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(deliver/4 ::
-        (pid(), rabbit_types:ctag(), boolean(), rabbit_amqqueue:qmsg())
-        -> 'ok').
--spec(deliver_reply/2 :: (binary(), rabbit_types:delivery()) -> 'ok').
--spec(deliver_reply_local/3 ::
-        (pid(), binary(), rabbit_types:delivery()) -> 'ok').
--spec(send_credit_reply/2 :: (pid(), non_neg_integer()) -> 'ok').
--spec(send_drained/2 :: (pid(), [{rabbit_types:ctag(), non_neg_integer()}])
-                        -> 'ok').
--spec(list/0 :: () -> [pid()]).
--spec(list_local/0 :: () -> [pid()]).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (pid()) -> rabbit_types:infos()).
--spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()).
--spec(info_all/0 :: () -> [rabbit_types:infos()]).
--spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
--spec(info_all/3 :: (rabbit_types:info_keys(), reference(), pid()) -> 'ok').
--spec(refresh_config_local/0 :: () -> 'ok').
--spec(ready_for_close/1 :: (pid()) -> 'ok').
--spec(force_event_refresh/1 :: (reference()) -> 'ok').
-
--endif.
+-type channel() :: #ch{}.
+
+-spec start_link
+        (channel_number(), pid(), pid(), pid(), string(), rabbit_types:protocol(),
+         rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(),
+         pid(), pid()) ->
+            rabbit_types:ok_pid_or_error().
+-spec do(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec do
+        (pid(), rabbit_framing:amqp_method_record(),
+         rabbit_types:maybe(rabbit_types:content())) ->
+            'ok'.
+-spec do_flow
+        (pid(), rabbit_framing:amqp_method_record(),
+         rabbit_types:maybe(rabbit_types:content())) ->
+            'ok'.
+-spec flush(pid()) -> 'ok'.
+-spec shutdown(pid()) -> 'ok'.
+-spec send_command(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec deliver
+        (pid(), rabbit_types:ctag(), boolean(), rabbit_amqqueue:qmsg()) -> 'ok'.
+-spec deliver_reply(binary(), rabbit_types:delivery()) -> 'ok'.
+-spec deliver_reply_local(pid(), binary(), rabbit_types:delivery()) -> 'ok'.
+-spec send_credit_reply(pid(), non_neg_integer()) -> 'ok'.
+-spec send_drained(pid(), [{rabbit_types:ctag(), non_neg_integer()}]) -> 'ok'.
+-spec list() -> [pid()].
+-spec list_local() -> [pid()].
+-spec info_keys() -> rabbit_types:info_keys().
+-spec info(pid()) -> rabbit_types:infos().
+-spec info(pid(), rabbit_types:info_keys()) -> rabbit_types:infos().
+-spec info_all() -> [rabbit_types:infos()].
+-spec info_all(rabbit_types:info_keys()) -> [rabbit_types:infos()].
+-spec info_all(rabbit_types:info_keys(), reference(), pid()) -> 'ok'.
+-spec refresh_config_local() -> 'ok'.
+-spec ready_for_close(pid()) -> 'ok'.
+-spec force_event_refresh(reference()) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
@@ -327,6 +326,9 @@ info_all() ->
 info_all(Items) ->
     rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list()).
 
+info_local(Items) ->
+    rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list_local()).
+
 info_all(Items, Ref, AggregatorPid) ->
     rabbit_control_misc:emitting_map_with_exit_handler(
       AggregatorPid, Ref, fun(C) -> info(C, Items) end, list()).
@@ -607,14 +609,8 @@ handle_pre_hibernate(State) ->
                 end),
     {hibernate, rabbit_event:stop_stats_timer(State, #ch.stats_timer)}.
 
-terminate(Reason, State) ->
-    {Res, _State1} = notify_queues(State),
-    case Reason of
-        normal            -> ok = Res;
-        shutdown          -> ok = Res;
-        {shutdown, _Term} -> ok = Res;
-        _                 -> ok
-    end,
+terminate(_Reason, State) ->
+    {_Res, _State1} = notify_queues(State),
     pg_local:leave(rabbit_channels, self()),
     rabbit_event:if_enabled(State, #ch.stats_timer,
                             fun() -> emit_stats(State) end),
@@ -681,14 +677,12 @@ handle_exception(Reason, State = #ch{protocol     = Protocol,
             {stop, normal, State1}
     end.
 
--ifdef(use_specs).
--spec(precondition_failed/1 :: (string()) -> no_return()).
--endif.
+-spec precondition_failed(string()) -> no_return().
+
 precondition_failed(Format) -> precondition_failed(Format, []).
 
--ifdef(use_specs).
--spec(precondition_failed/2 :: (string(), [any()]) -> no_return()).
--endif.
+-spec precondition_failed(string(), [any()]) -> no_return().
+
 precondition_failed(Format, Params) ->
     rabbit_misc:protocol_error(precondition_failed, Format, Params).
 
@@ -1982,6 +1976,11 @@ i(state,                   #ch{state = State})            -> State;
 i(prefetch_count,          #ch{consumer_prefetch = C})    -> C;
 i(global_prefetch_count, #ch{limiter = Limiter}) ->
     rabbit_limiter:get_prefetch_limit(Limiter);
+i(garbage_collection, _State) ->
+    rabbit_misc:get_gc_info(self());
+i(reductions, _State) ->
+    {reductions, Reductions} = erlang:process_info(self(), reductions),
+    Reductions;
 i(Item, _) ->
     throw({bad_argument, Item}).
 
index 9793459c4588003de6179d63fda14605b9156237..909bf4aecceeb7a664936136c091ded2b1eeb985 100644 (file)
@@ -21,8 +21,6 @@
 
 -export([init/1, intercept_in/3]).
 
--ifdef(use_specs).
-
 -type(method_name() :: rabbit_framing:amqp_method_name()).
 -type(original_method() :: rabbit_framing:amqp_method_record()).
 -type(processed_method() :: rabbit_framing:amqp_method_record()).
     rabbit_misc:channel_or_connection_exit().
 -callback applies_to() -> list(method_name()).
 
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{description, 0}, {init, 1}, {intercept, 3}, {applies_to, 0}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
-
 init(Ch) ->
     Mods = [M || {_, M} <- rabbit_registry:lookup_all(channel_interceptor)],
     check_no_overlap(Mods),
index 49601dbce49f1e18dbdad690ed1c16dedfb69893..5adf7b1b5f7940d404388e8269f844eb3227981f 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([frame/0]).
 
--type(frame_type() :: ?FRAME_METHOD | ?FRAME_HEADER | ?FRAME_BODY |
+-type frame_type() :: ?FRAME_METHOD | ?FRAME_HEADER | ?FRAME_BODY |
                       ?FRAME_OOB_METHOD | ?FRAME_OOB_HEADER | ?FRAME_OOB_BODY |
-                      ?FRAME_TRACE | ?FRAME_HEARTBEAT).
--type(protocol()   :: rabbit_framing:protocol()).
--type(method()     :: rabbit_framing:amqp_method_record()).
--type(class_id()   :: rabbit_framing:amqp_class_id()).
--type(weight()     :: non_neg_integer()).
--type(body_size()  :: non_neg_integer()).
--type(content()    :: rabbit_types:undecoded_content()).
-
--type(frame() ::
+                      ?FRAME_TRACE | ?FRAME_HEARTBEAT.
+-type protocol()   :: rabbit_framing:protocol().
+-type method()     :: rabbit_framing:amqp_method_record().
+-type class_id()   :: rabbit_framing:amqp_class_id().
+-type weight()     :: non_neg_integer().
+-type body_size()  :: non_neg_integer().
+-type content()    :: rabbit_types:undecoded_content().
+
+-type frame() ::
         {'method',         rabbit_framing:amqp_method_name(), binary()} |
         {'content_header', class_id(), weight(), body_size(), binary()} |
-        {'content_body',   binary()}).
+        {'content_body',   binary()}.
 
--type(state() ::
+-type state() ::
         {'method',         protocol()} |
         {'content_header', method(), class_id(), protocol()} |
-        {'content_body',   method(), body_size(), class_id(), protocol()}).
-
--spec(analyze_frame/3 :: (frame_type(), binary(), protocol()) ->
-                              frame() | 'heartbeat' | 'error').
+        {'content_body',   method(), body_size(), class_id(), protocol()}.
 
--spec(init/1 :: (protocol()) -> {ok, state()}).
--spec(process/2 :: (frame(), state()) ->
-                        {ok, state()} |
-                        {ok, method(), state()} |
-                        {ok, method(), content(), state()} |
-                        {error, rabbit_types:amqp_error()}).
+-spec analyze_frame(frame_type(), binary(), protocol()) ->
+          frame() | 'heartbeat' | 'error'.
 
--endif.
+-spec init(protocol()) -> {ok, state()}.
+-spec process(frame(), state()) ->
+          {ok, state()} |
+          {ok, method(), state()} |
+          {ok, method(), content(), state()} |
+          {error, rabbit_types:amqp_error()}.
 
 %%--------------------------------------------------------------------
 
index 758e314d498c8f32e84cf305fd0d46b41ef7a3f6..a1a47682c3a9e0f2b88296aad1b9c1509e1a0373 100644 (file)
@@ -2,12 +2,13 @@
 
 {application, rabbit_common, [
        {description, ""},
-       {vsn, "3.6.1"},
+       {vsn, "3.6.5"},
        {id, "git"},
        {modules, []},
        {registered, []},
        {applications, [
                kernel,
-               stdlib
+                stdlib,
+                xmerl
        ]}
 ]}.
index 2e274e858b0b3f0326e56eabfefce2a1bc08ddec..2e1f6cc81bfa53e55a18edd59186b004de95d2c2 100644 (file)
          emitting_map_with_exit_handler/5, wait_for_info_messages/5,
          print_cmd_result/2]).
 
--ifdef(use_specs).
-
--spec(emitting_map/4 :: (pid(), reference(), fun(), list()) -> 'ok').
--spec(emitting_map/5 :: (pid(), reference(), fun(), list(), atom()) -> 'ok').
--spec(emitting_map_with_exit_handler/4 ::
-        (pid(), reference(), fun(), list()) -> 'ok').
--spec(emitting_map_with_exit_handler/5 ::
-        (pid(), reference(), fun(), list(), atom()) -> 'ok').
--spec(print_cmd_result/2 :: (atom(), term()) -> 'ok').
-
--endif.
+-spec emitting_map(pid(), reference(), fun(), list()) -> 'ok'.
+-spec emitting_map(pid(), reference(), fun(), list(), atom()) -> 'ok'.
+-spec emitting_map_with_exit_handler
+        (pid(), reference(), fun(), list()) -> 'ok'.
+-spec emitting_map_with_exit_handler
+        (pid(), reference(), fun(), list(), atom()) -> 'ok'.
+-spec print_cmd_result(atom(), term()) -> 'ok'.
 
 emitting_map(AggregatorPid, Ref, Fun, List) ->
     emitting_map(AggregatorPid, Ref, Fun, List, continue),
@@ -93,4 +89,5 @@ wait_for_info_messages(Ref, InfoItemKeys, DisplayFun) when is_reference(Ref) ->
 notify_if_timeout(Pid, Ref, Timeout) ->
     timer:send_after(Timeout, Pid, {Ref, {timeout, Timeout}}).
 
-print_cmd_result(authenticate_user, _Result) -> io:format("Success~n").
+print_cmd_result(authenticate_user, _Result) -> io:format("Success~n");
+print_cmd_result(join_cluster, already_member) -> io:format("The node is already a member of this cluster~n").
diff --git a/rabbitmq-server/deps/rabbit_common/src/rabbit_ct_broker_helpers.erl b/rabbitmq-server/deps/rabbit_common/src/rabbit_ct_broker_helpers.erl
new file mode 100644 (file)
index 0000000..05ede9d
--- /dev/null
@@ -0,0 +1,1037 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_ct_broker_helpers).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/inet.hrl").
+-include("include/rabbit.hrl").
+
+-export([
+    setup_steps/0,
+    teardown_steps/0,
+    start_rabbitmq_nodes/1,
+    stop_rabbitmq_nodes/1,
+    rewrite_node_config_file/2,
+    cluster_nodes/1, cluster_nodes/2,
+
+    get_node_configs/1, get_node_configs/2,
+    get_node_config/2, get_node_config/3, set_node_config/3,
+    nodename_to_index/2,
+    node_uri/2, node_uri/3,
+
+    control_action/2, control_action/3, control_action/4,
+    rabbitmqctl/3, rabbitmqctl_list/3,
+
+    add_code_path_to_node/2,
+    add_code_path_to_all_nodes/2,
+    rpc/5, rpc/6,
+    rpc_all/4, rpc_all/5,
+
+    start_node/2,
+    start_broker/2,
+    restart_broker/2,
+    stop_broker/2,
+    restart_node/2,
+    stop_node/2,
+    stop_node_after/3,
+    kill_node/2,
+    kill_node_after/3,
+
+    set_partition_handling_mode/3,
+    set_partition_handling_mode_globally/2,
+    enable_dist_proxy_manager/1,
+    enable_dist_proxy/1,
+    enable_dist_proxy_on_node/3,
+    block_traffic_between/2,
+    allow_traffic_between/2,
+
+    get_connection_pids/1,
+    get_queue_sup_pid/1,
+
+    set_policy/6,
+    clear_policy/3,
+    set_ha_policy/4, set_ha_policy/5,
+    set_ha_policy_all/1,
+    set_ha_policy_two_pos/1,
+    set_ha_policy_two_pos_batch_sync/1,
+
+    set_parameter/5,
+    clear_parameter/4,
+
+    enable_plugin/3,
+    disable_plugin/3,
+
+    test_channel/0
+  ]).
+
+%% Internal functions exported to be used by rpc:call/4.
+-export([
+    do_restart_broker/0
+  ]).
+
+-define(DEFAULT_USER, "guest").
+-define(NODE_START_ATTEMPTS, 10).
+
+-define(TCP_PORTS_BASE, 21000).
+-define(TCP_PORTS_LIST, [
+    tcp_port_amqp,
+    tcp_port_amqp_tls,
+    tcp_port_mgmt,
+    tcp_port_erlang_dist,
+    tcp_port_erlang_dist_proxy,
+    tcp_port_mqtt,
+    tcp_port_mqtt_tls,
+    tcp_port_web_mqtt,
+    tcp_port_stomp,
+    tcp_port_stomp_tls,
+    tcp_port_web_stomp
+  ]).
+
+%% -------------------------------------------------------------------
+%% Broker setup/teardown steps.
+%% -------------------------------------------------------------------
+
+setup_steps() ->
+    [
+      fun run_make_dist/1,
+      fun start_rabbitmq_nodes/1,
+      fun share_dist_and_proxy_ports_map/1
+    ].
+
+teardown_steps() ->
+    [
+      fun stop_rabbitmq_nodes/1
+    ].
+
+run_make_dist(Config) ->
+    SrcDir = ?config(current_srcdir, Config),
+    case rabbit_ct_helpers:make(Config, SrcDir, ["test-dist"]) of
+        {ok, _} -> Config;
+        _       -> {skip, "Failed to run \"make test-dist\""}
+    end.
+
+start_rabbitmq_nodes(Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_username, list_to_binary(?DEFAULT_USER)},
+        {rmq_password, list_to_binary(?DEFAULT_USER)},
+        {rmq_hostname, "localhost"},
+        {rmq_vhost, <<"/">>},
+        {rmq_channel_max, 0}]),
+    NodesCount0 = rabbit_ct_helpers:get_config(Config1, rmq_nodes_count),
+    NodesCount = case NodesCount0 of
+        undefined                                -> 1;
+        N when is_integer(N) andalso N >= 1      -> N;
+        L when is_list(L) andalso length(L) >= 1 -> length(L)
+    end,
+    Clustered0 = rabbit_ct_helpers:get_config(Config1, rmq_nodes_clustered),
+    Clustered = case Clustered0 of
+        undefined            -> true;
+        C when is_boolean(C) -> C
+    end,
+    Master = self(),
+    Starters = [
+      spawn_link(fun() -> start_rabbitmq_node(Master, Config1, [], I) end)
+      || I <- lists:seq(0, NodesCount - 1)
+    ],
+    wait_for_rabbitmq_nodes(Config1, Starters, [], Clustered).
+
+wait_for_rabbitmq_nodes(Config, [], NodeConfigs, Clustered) ->
+    NodeConfigs1 = [NC || {_, NC} <- lists:keysort(1, NodeConfigs)],
+    Config1 = rabbit_ct_helpers:set_config(Config, {rmq_nodes, NodeConfigs1}),
+    if
+        Clustered -> cluster_nodes(Config1);
+        true      -> Config1
+    end;
+wait_for_rabbitmq_nodes(Config, Starting, NodeConfigs, Clustered) ->
+    receive
+        {_, {skip, _} = Error} ->
+            NodeConfigs1 = [NC || {_, NC} <- NodeConfigs],
+            Config1 = rabbit_ct_helpers:set_config(Config,
+              {rmq_nodes, NodeConfigs1}),
+            stop_rabbitmq_nodes(Config1),
+            Error;
+        {Pid, I, NodeConfig} when NodeConfigs =:= [] ->
+            wait_for_rabbitmq_nodes(Config, Starting -- [Pid],
+              [{I, NodeConfig} | NodeConfigs], Clustered);
+        {Pid, I, NodeConfig} ->
+            wait_for_rabbitmq_nodes(Config, Starting -- [Pid],
+              [{I, NodeConfig} | NodeConfigs], Clustered)
+    end.
+
+%% To start a RabbitMQ node, we need to:
+%%   1. Pick TCP port numbers
+%%   2. Generate a node name
+%%   3. Write a configuration file
+%%   4. Start the node
+%%
+%% If this fails (usually because the node name is taken or a TCP port
+%% is already in use), we start again with another set of TCP ports. The
+%% node name is derived from the AMQP TCP port so a new node name is
+%% generated.
+
+start_rabbitmq_node(Master, Config, NodeConfig, I) ->
+    Attempts0 = rabbit_ct_helpers:get_config(NodeConfig, failed_boot_attempts),
+    Attempts = case Attempts0 of
+        undefined -> 0;
+        N         -> N
+    end,
+    NodeConfig1 = init_tcp_port_numbers(Config, NodeConfig, I),
+    NodeConfig2 = init_nodename(Config, NodeConfig1, I),
+    NodeConfig3 = init_config_filename(Config, NodeConfig2, I),
+    Steps = [
+      fun write_config_file/3,
+      fun do_start_rabbitmq_node/3
+    ],
+    case run_node_steps(Config, NodeConfig3, I, Steps) of
+        {skip, _} = Error
+        when Attempts >= ?NODE_START_ATTEMPTS ->
+            %% It's unlikely we'll ever succeed to start RabbitMQ.
+            Master ! {self(), Error},
+            unlink(Master);
+        {skip, _} ->
+            %% Try again with another TCP port numbers base.
+            NodeConfig4 = move_nonworking_nodedir_away(NodeConfig3),
+            NodeConfig5 = rabbit_ct_helpers:set_config(NodeConfig4,
+              {failed_boot_attempts, Attempts + 1}),
+            start_rabbitmq_node(Master, Config, NodeConfig5, I);
+        NodeConfig4 ->
+            Master ! {self(), I, NodeConfig4},
+            unlink(Master)
+    end.
+
+run_node_steps(Config, NodeConfig, I, [Step | Rest]) ->
+    case Step(Config, NodeConfig, I) of
+        {skip, _} = Error -> Error;
+        NodeConfig1       -> run_node_steps(Config, NodeConfig1, I, Rest)
+    end;
+run_node_steps(_, NodeConfig, _, []) ->
+    NodeConfig.
+
+init_tcp_port_numbers(Config, NodeConfig, I) ->
+    %% If there is no TCP port numbers base previously calculated,
+    %% use the TCP port 21000. If a base was previously calculated,
+    %% increment it by the number of TCP ports we may open.
+    %%
+    %% Port 21000 is an arbitrary choice. We don't want to use the
+    %% default AMQP port of 5672 so other AMQP clients on the same host
+    %% do not accidentally use the testsuite broker. There seems to be
+    %% no registered service around this port in /etc/services. And it
+    %% should be far enough away from the default ephemeral TCP ports
+    %% range.
+    ExtraPorts = case rabbit_ct_helpers:get_config(Config, rmq_extra_tcp_ports) of
+        undefined           -> [];
+        EP when is_list(EP) -> EP
+    end,
+    PortsCount = length(?TCP_PORTS_LIST) + length(ExtraPorts),
+    Base = case rabbit_ct_helpers:get_config(NodeConfig, tcp_ports_base) of
+        undefined -> tcp_port_base_for_broker(Config, I, PortsCount);
+        P         -> P + PortsCount
+    end,
+    NodeConfig1 = rabbit_ct_helpers:set_config(NodeConfig,
+      {tcp_ports_base, Base}),
+    %% Now, compute all TCP port numbers from this base.
+    {NodeConfig2, _} = lists:foldl(
+      fun(PortName, {NewConfig, NextPort}) ->
+          {
+            rabbit_ct_helpers:set_config(NewConfig, {PortName, NextPort}),
+            NextPort + 1
+          }
+      end,
+      {NodeConfig1, Base}, ?TCP_PORTS_LIST ++ ExtraPorts),
+    %% Finally, update the RabbitMQ configuration with the computed TCP
+    %% port numbers. Extra TCP ports are not added automatically to the
+    %% configuration.
+    update_tcp_ports_in_rmq_config(NodeConfig2, ?TCP_PORTS_LIST).
+
+tcp_port_base_for_broker(Config, I, PortsCount) ->
+    Base = case rabbit_ct_helpers:get_config(Config, tcp_ports_base) of
+        undefined ->
+            ?TCP_PORTS_BASE;
+        {skip_n_nodes, N} ->
+            tcp_port_base_for_broker1(?TCP_PORTS_BASE, N, PortsCount);
+        B ->
+            B
+    end,
+    tcp_port_base_for_broker1(Base, I, PortsCount).
+
+tcp_port_base_for_broker1(Base, I, PortsCount) ->
+    Base + I * PortsCount * ?NODE_START_ATTEMPTS.
+
+update_tcp_ports_in_rmq_config(NodeConfig, [tcp_port_amqp = Key | Rest]) ->
+    NodeConfig1 = rabbit_ct_helpers:merge_app_env(NodeConfig,
+      {rabbit, [{tcp_listeners, [?config(Key, NodeConfig)]}]}),
+    update_tcp_ports_in_rmq_config(NodeConfig1, Rest);
+update_tcp_ports_in_rmq_config(NodeConfig, [tcp_port_amqp_tls = Key | Rest]) ->
+    NodeConfig1 = rabbit_ct_helpers:merge_app_env(NodeConfig,
+      {rabbit, [{ssl_listeners, [?config(Key, NodeConfig)]}]}),
+    update_tcp_ports_in_rmq_config(NodeConfig1, Rest);
+update_tcp_ports_in_rmq_config(NodeConfig, [tcp_port_mgmt = Key | Rest]) ->
+    NodeConfig1 = rabbit_ct_helpers:merge_app_env(NodeConfig,
+      {rabbitmq_management, [{listener, [{port, ?config(Key, NodeConfig)}]}]}),
+    update_tcp_ports_in_rmq_config(NodeConfig1, Rest);
+update_tcp_ports_in_rmq_config(NodeConfig, [tcp_port_mqtt = Key | Rest]) ->
+    NodeConfig1 = rabbit_ct_helpers:merge_app_env(NodeConfig,
+      {rabbitmq_mqtt, [{tcp_listeners, [?config(Key, NodeConfig)]}]}),
+    update_tcp_ports_in_rmq_config(NodeConfig1, Rest);
+update_tcp_ports_in_rmq_config(NodeConfig, [tcp_port_mqtt_tls = Key | Rest]) ->
+    NodeConfig1 = rabbit_ct_helpers:merge_app_env(NodeConfig,
+      {rabbitmq_mqtt, [{ssl_listeners, [?config(Key, NodeConfig)]}]}),
+    update_tcp_ports_in_rmq_config(NodeConfig1, Rest);
+update_tcp_ports_in_rmq_config(NodeConfig, [tcp_port_web_mqtt = Key | Rest]) ->
+    NodeConfig1 = rabbit_ct_helpers:merge_app_env(NodeConfig,
+      {rabbitmq_web_mqtt, [{tcp_config, [{port, ?config(Key, NodeConfig)}]}]}),
+    update_tcp_ports_in_rmq_config(NodeConfig1, Rest);
+update_tcp_ports_in_rmq_config(NodeConfig, [tcp_port_web_stomp = Key | Rest]) ->
+    NodeConfig1 = rabbit_ct_helpers:merge_app_env(NodeConfig,
+      {rabbitmq_web_stomp, [{tcp_config, [{port, ?config(Key, NodeConfig)}]}]}),
+    update_tcp_ports_in_rmq_config(NodeConfig1, Rest);
+update_tcp_ports_in_rmq_config(NodeConfig, [tcp_port_stomp = Key | Rest]) ->
+    NodeConfig1 = rabbit_ct_helpers:merge_app_env(NodeConfig,
+      {rabbitmq_stomp, [{tcp_listeners, [?config(Key, NodeConfig)]}]}),
+    update_tcp_ports_in_rmq_config(NodeConfig1, Rest);
+update_tcp_ports_in_rmq_config(NodeConfig, [tcp_port_stomp_tls = Key | Rest]) ->
+    NodeConfig1 = rabbit_ct_helpers:merge_app_env(NodeConfig,
+      {rabbitmq_stomp, [{ssl_listeners, [?config(Key, NodeConfig)]}]}),
+    update_tcp_ports_in_rmq_config(NodeConfig1, Rest);
+update_tcp_ports_in_rmq_config(NodeConfig, [tcp_port_erlang_dist | Rest]) ->
+    %% The Erlang distribution port doesn't appear in the configuration file.
+    update_tcp_ports_in_rmq_config(NodeConfig, Rest);
+update_tcp_ports_in_rmq_config(NodeConfig, [tcp_port_erlang_dist_proxy | Rest]) ->
+    %% inet_proxy_dist port doesn't appear in the configuration file.
+    update_tcp_ports_in_rmq_config(NodeConfig, Rest);
+update_tcp_ports_in_rmq_config(NodeConfig, []) ->
+    NodeConfig.
+
+init_nodename(Config, NodeConfig, I) ->
+    Nodename0 = case rabbit_ct_helpers:get_config(Config, rmq_nodes_count) of
+        NodesList when is_list(NodesList) ->
+            Name = lists:nth(I + 1, NodesList),
+            rabbit_misc:format("~s@localhost", [Name]);
+        _ ->
+            Base = ?config(tcp_ports_base, NodeConfig),
+            Suffix0 = rabbit_ct_helpers:get_config(Config,
+              rmq_nodename_suffix),
+            Suffix = case Suffix0 of
+                undefined               -> "";
+                _ when is_atom(Suffix0) -> [$- | atom_to_list(Suffix0)];
+                _                       -> [$- | Suffix0]
+            end,
+            rabbit_misc:format("rmq-ct~s-~b-~b@localhost",
+              [Suffix, I + 1, Base])
+    end,
+    Nodename = list_to_atom(Nodename0),
+    rabbit_ct_helpers:set_config(NodeConfig, [
+        {nodename, Nodename},
+        {initial_nodename, Nodename}
+      ]).
+
+init_config_filename(Config, NodeConfig, _I) ->
+    PrivDir = ?config(priv_dir, Config),
+    Nodename = ?config(nodename, NodeConfig),
+    ConfigDir = filename:join(PrivDir, Nodename),
+    ConfigFile = filename:join(ConfigDir, Nodename),
+    rabbit_ct_helpers:set_config(NodeConfig,
+      {erlang_node_config_filename, ConfigFile}).
+
+write_config_file(Config, NodeConfig, _I) ->
+    %% Prepare a RabbitMQ configuration.
+    ErlangConfigBase = ?config(erlang_node_config, Config),
+    ErlangConfigOverlay = ?config(erlang_node_config, NodeConfig),
+    ErlangConfig = rabbit_ct_helpers:merge_app_env_in_erlconf(ErlangConfigBase,
+      ErlangConfigOverlay),
+    ConfigFile = ?config(erlang_node_config_filename, NodeConfig),
+    ConfigDir = filename:dirname(ConfigFile),
+    Ret1 = file:make_dir(ConfigDir),
+    Ret2 = file:write_file(ConfigFile ++ ".config",
+      io_lib:format("% vim:ft=erlang:~n~n~p.~n", [ErlangConfig])),
+    case {Ret1, Ret2} of
+        {ok, ok} ->
+            NodeConfig;
+        {{error, eexist}, ok} ->
+            NodeConfig;
+        {{error, Reason}, _} when Reason =/= eexist ->
+            {skip, "Failed to create Erlang node config directory \"" ++
+             ConfigDir ++ "\": " ++ file:format_error(Reason)};
+        {_, {error, Reason}} ->
+            {skip, "Failed to create Erlang node config file \"" ++
+             ConfigFile ++ "\": " ++ file:format_error(Reason)}
+    end.
+
+do_start_rabbitmq_node(Config, NodeConfig, I) ->
+    WithPlugins0 = rabbit_ct_helpers:get_config(Config,
+      broker_with_plugins),
+    WithPlugins = case is_list(WithPlugins0) of
+        true  -> lists:nth(I + 1, WithPlugins0);
+        false -> WithPlugins0
+    end,
+    SrcDir = case WithPlugins of
+        false -> ?config(rabbit_srcdir, Config);
+        _     -> ?config(current_srcdir, Config)
+    end,
+    PrivDir = ?config(priv_dir, Config),
+    Nodename = ?config(nodename, NodeConfig),
+    InitialNodename = ?config(initial_nodename, NodeConfig),
+    DistPort = ?config(tcp_port_erlang_dist, NodeConfig),
+    ConfigFile = ?config(erlang_node_config_filename, NodeConfig),
+    %% Use inet_proxy_dist to handle distribution. This is used by the
+    %% partitions testsuite.
+    DistMod = rabbit_ct_helpers:get_config(Config, erlang_dist_module),
+    StartArgs0 = case DistMod of
+        undefined ->
+            "";
+        _ ->
+            DistModS = atom_to_list(DistMod),
+            DistModPath = filename:absname(
+              filename:dirname(code:where_is_file(DistModS ++ ".beam"))),
+            DistArg = re:replace(DistModS, "_dist$", "", [{return, list}]),
+            "-pa \"" ++ DistModPath ++ "\" -proto_dist " ++ DistArg
+    end,
+    %% Set the net_ticktime.
+    CurrentTicktime = case net_kernel:get_net_ticktime() of
+        {ongoing_change_to, T} -> T;
+        T                      -> T
+    end,
+    StartArgs1 = case rabbit_ct_helpers:get_config(Config, net_ticktime) of
+        undefined ->
+            case CurrentTicktime of
+                60 -> ok;
+                _  -> net_kernel:set_net_ticktime(60)
+            end,
+            StartArgs0;
+        Ticktime ->
+            case CurrentTicktime of
+                Ticktime -> ok;
+                _        -> net_kernel:set_net_ticktime(Ticktime)
+            end,
+            StartArgs0 ++ " -kernel net_ticktime " ++ integer_to_list(Ticktime)
+    end,
+    Cmd = ["start-background-broker",
+      {"RABBITMQ_NODENAME=~s", [Nodename]},
+      {"RABBITMQ_NODENAME_FOR_PATHS=~s", [InitialNodename]},
+      {"RABBITMQ_DIST_PORT=~b", [DistPort]},
+      {"RABBITMQ_CONFIG_FILE=~s", [ConfigFile]},
+      {"RABBITMQ_SERVER_START_ARGS=~s", [StartArgs1]},
+      {"TEST_TMPDIR=~s", [PrivDir]}],
+    case rabbit_ct_helpers:make(Config, SrcDir, Cmd) of
+        {ok, _} -> query_node(Config, NodeConfig);
+        _       -> {skip, "Failed to initialize RabbitMQ"}
+    end.
+
+query_node(Config, NodeConfig) ->
+    Nodename = ?config(nodename, NodeConfig),
+    PidFile = rpc(Config, Nodename, os, getenv, ["RABBITMQ_PID_FILE"]),
+    MnesiaDir = rpc(Config, Nodename, mnesia, system_info, [directory]),
+    {ok, PluginsDir} = rpc(Config, Nodename, application, get_env,
+      [rabbit, plugins_dir]),
+    {ok, EnabledPluginsFile} = rpc(Config, Nodename, application, get_env,
+      [rabbit, enabled_plugins_file]),
+    rabbit_ct_helpers:set_config(NodeConfig, [
+        {pid_file, PidFile},
+        {mnesia_dir, MnesiaDir},
+        {plugins_dir, PluginsDir},
+        {enabled_plugins_file, EnabledPluginsFile}
+      ]).
+
+cluster_nodes(Config) ->
+    [NodeConfig1 | NodeConfigs] = get_node_configs(Config),
+    cluster_nodes1(Config, NodeConfig1, NodeConfigs).
+
+cluster_nodes(Config, Nodes) ->
+    [NodeConfig1 | NodeConfigs] = [
+      get_node_config(Config, Node) || Node <- Nodes],
+    cluster_nodes1(Config, NodeConfig1, NodeConfigs).
+
+cluster_nodes1(Config, NodeConfig1, [NodeConfig2 | Rest]) ->
+    case cluster_nodes(Config, NodeConfig2, NodeConfig1) of
+        ok    -> cluster_nodes1(Config, NodeConfig1, Rest);
+        Error -> Error
+    end;
+cluster_nodes1(Config, _, []) ->
+    Config.
+
+cluster_nodes(Config, NodeConfig1, NodeConfig2) ->
+    Nodename1 = ?config(nodename, NodeConfig1),
+    Nodename2 = ?config(nodename, NodeConfig2),
+    Cmds = [
+      ["stop_app"],
+      ["join_cluster", Nodename2],
+      ["start_app"]
+    ],
+    cluster_nodes1(Config, Nodename1, Nodename2, Cmds).
+
+cluster_nodes1(Config, Nodename1, Nodename2, [Cmd | Rest]) ->
+    case rabbitmqctl(Config, Nodename1, Cmd) of
+        {ok, _} -> cluster_nodes1(Config, Nodename1, Nodename2, Rest);
+        _       -> {skip,
+                    "Failed to cluster nodes \"" ++ atom_to_list(Nodename1) ++
+                    "\" and \"" ++ atom_to_list(Nodename2) ++ "\""}
+    end;
+cluster_nodes1(_, _, _, []) ->
+    ok.
+
+move_nonworking_nodedir_away(NodeConfig) ->
+    ConfigFile = ?config(erlang_node_config_filename, NodeConfig),
+    ConfigDir = filename:dirname(ConfigFile),
+    NewName = filename:join(
+      filename:dirname(ConfigDir),
+      "_unused_nodedir_" ++ filename:basename(ConfigDir)),
+    file:rename(ConfigDir, NewName),
+    lists:keydelete(erlang_node_config_filename, 1, NodeConfig).
+
+share_dist_and_proxy_ports_map(Config) ->
+    Map = [
+      {
+        ?config(tcp_port_erlang_dist, NodeConfig),
+        ?config(tcp_port_erlang_dist_proxy, NodeConfig)
+      } || NodeConfig <- get_node_configs(Config)],
+    rpc_all(Config,
+      application, set_env, [kernel, dist_and_proxy_ports_map, Map]),
+    Config.
+
+rewrite_node_config_file(Config, Node) ->
+    NodeConfig = get_node_config(Config, Node),
+    I = if
+        is_integer(Node) -> Node;
+        true             -> nodename_to_index(Config, Node)
+    end,
+    %% Keep copies of previous config file.
+    ConfigFile = ?config(erlang_node_config_filename, NodeConfig),
+    case rotate_config_file(ConfigFile) of
+        ok ->
+            ok;
+        {error, Reason} ->
+            ct:pal("Failed to rotate config file ~s: ~s",
+              [ConfigFile, file:format_error(Reason)])
+    end,
+    %% Now we can write the new file. The caller is responsible for
+    %% restarting the broker/node.
+    case write_config_file(Config, NodeConfig, I) of
+        {skip, Error} -> {error, Error};
+        _NodeConfig1  -> ok
+    end.
+
+rotate_config_file(ConfigFile) ->
+    rotate_config_file(ConfigFile, ConfigFile ++ ".config", 1).
+
+rotate_config_file(ConfigFile, OldName, Ext) ->
+    NewName = rabbit_misc:format("~s.config.~b", [ConfigFile, Ext]),
+    case filelib:is_file(NewName) of
+        true  ->
+            case rotate_config_file(ConfigFile, NewName, Ext + 1) of
+                ok    -> file:rename(OldName, NewName);
+                Error -> Error
+            end;
+        false ->
+            file:rename(OldName, NewName)
+    end.
+
+stop_rabbitmq_nodes(Config) ->
+    NodeConfigs = get_node_configs(Config),
+    [stop_rabbitmq_node(Config, NodeConfig) || NodeConfig <- NodeConfigs],
+    proplists:delete(rmq_nodes, Config).
+
+stop_rabbitmq_node(Config, NodeConfig) ->
+    SrcDir = ?config(current_srcdir, Config),
+    PrivDir = ?config(priv_dir, Config),
+    Nodename = ?config(nodename, NodeConfig),
+    InitialNodename = ?config(initial_nodename, NodeConfig),
+    Cmd = ["stop-rabbit-on-node", "stop-node",
+      {"RABBITMQ_NODENAME=~s", [Nodename]},
+      {"RABBITMQ_NODENAME_FOR_PATHS=~s", [InitialNodename]},
+      {"TEST_TMPDIR=~s", [PrivDir]}],
+    rabbit_ct_helpers:make(Config, SrcDir, Cmd),
+    NodeConfig.
+
+%% -------------------------------------------------------------------
+%% Helpers for partition simulation
+%% -------------------------------------------------------------------
+
+enable_dist_proxy_manager(Config) ->
+    inet_tcp_proxy_manager:start(),
+    rabbit_ct_helpers:set_config(Config,
+      {erlang_dist_module, inet_proxy_dist}).
+
+enable_dist_proxy(Config) ->
+    NodeConfigs = rabbit_ct_broker_helpers:get_node_configs(Config),
+    Nodes = [?config(nodename, NodeConfig) || NodeConfig <- NodeConfigs],
+    ManagerNode = node(),
+    ok = lists:foreach(
+      fun(NodeConfig) ->
+          ok = rabbit_ct_broker_helpers:rpc(Config,
+            ?config(nodename, NodeConfig),
+            ?MODULE, enable_dist_proxy_on_node,
+            [NodeConfig, ManagerNode, Nodes])
+      end, NodeConfigs),
+    Config.
+
+enable_dist_proxy_on_node(NodeConfig, ManagerNode, Nodes) ->
+    Nodename = ?config(nodename, NodeConfig),
+    DistPort = ?config(tcp_port_erlang_dist, NodeConfig),
+    ProxyPort = ?config(tcp_port_erlang_dist_proxy, NodeConfig),
+    ok = inet_tcp_proxy:start(ManagerNode, DistPort, ProxyPort),
+    ok = inet_tcp_proxy:reconnect(Nodes -- [Nodename]).
+
+block_traffic_between(NodeA, NodeB) ->
+    rpc:call(NodeA, inet_tcp_proxy, block, [NodeB]),
+    rpc:call(NodeB, inet_tcp_proxy, block, [NodeA]).
+
+allow_traffic_between(NodeA, NodeB) ->
+    rpc:call(NodeA, inet_tcp_proxy, allow, [NodeB]),
+    rpc:call(NodeB, inet_tcp_proxy, allow, [NodeA]).
+
+set_partition_handling_mode_globally(Config, Mode) ->
+    rabbit_ct_broker_helpers:rpc_all(Config,
+      application, set_env, [rabbit, cluster_partition_handling, Mode]).
+
+set_partition_handling_mode(Config, Nodes, Mode) ->
+    rabbit_ct_broker_helpers:rpc(Config, Nodes,
+      application, set_env, [rabbit, cluster_partition_handling, Mode]).
+
+%% -------------------------------------------------------------------
+%% Calls to rabbitmqctl from Erlang.
+%% -------------------------------------------------------------------
+
+control_action(Command, Node) ->
+    control_action(Command, Node, [], []).
+
+control_action(Command, Node, Args) ->
+    control_action(Command, Node, Args, []).
+
+control_action(Command, Node, Args, Opts) ->
+    rpc:call(Node, rabbit_control_main, action,
+             [Command, Node, Args, Opts,
+              fun (F, A) ->
+                      error_logger:info_msg(F ++ "~n", A)
+              end]).
+
+%% Use rabbitmqctl(1) instead of using the Erlang API.
+
+rabbitmqctl(Config, Node, Args) ->
+    Rabbitmqctl = ?config(rabbitmqctl_cmd, Config),
+    NodeConfig = get_node_config(Config, Node),
+    Nodename = ?config(nodename, NodeConfig),
+    Env = [
+      {"RABBITMQ_PID_FILE", ?config(pid_file, NodeConfig)},
+      {"RABBITMQ_MNESIA_DIR", ?config(mnesia_dir, NodeConfig)},
+      {"RABBITMQ_PLUGINS_DIR", ?config(plugins_dir, NodeConfig)},
+      {"RABBITMQ_ENABLED_PLUGINS_FILE",
+        ?config(enabled_plugins_file, NodeConfig)}
+    ],
+    Cmd = [Rabbitmqctl, "-n", Nodename | Args],
+    rabbit_ct_helpers:exec(Cmd, [{env, Env}]).
+
+rabbitmqctl_list(Config, Node, Args) ->
+    {ok, StdOut} = rabbitmqctl(Config, Node, Args),
+    [<<"Listing", _/binary>>|Rows] = re:split(StdOut, <<"\n">>, [trim]),
+    [re:split(Row, <<"\t">>) || Row <- Rows].
+
+%% -------------------------------------------------------------------
+%% Other helpers.
+%% -------------------------------------------------------------------
+
+get_node_configs(Config) ->
+    ?config(rmq_nodes, Config).
+
+get_node_configs(Config, Key) ->
+    NodeConfigs = get_node_configs(Config),
+    [?config(Key, NodeConfig) || NodeConfig <- NodeConfigs].
+
+get_node_config(Config, Node) when is_atom(Node) andalso Node =/= undefined ->
+    NodeConfigs = get_node_configs(Config),
+    get_node_config1(NodeConfigs, Node);
+get_node_config(Config, I) when is_integer(I) andalso I >= 0 ->
+    NodeConfigs = get_node_configs(Config),
+    lists:nth(I + 1, NodeConfigs).
+
+get_node_config1([NodeConfig | Rest], Node) ->
+    case ?config(nodename, NodeConfig) of
+        Node -> NodeConfig;
+        _    -> case ?config(initial_nodename, NodeConfig) of
+                    Node -> NodeConfig;
+                    _    -> get_node_config1(Rest, Node)
+                end
+    end;
+get_node_config1([], Node) ->
+    exit({unknown_node, Node}).
+
+get_node_config(Config, Node, Key) ->
+    NodeConfig = get_node_config(Config, Node),
+    ?config(Key, NodeConfig).
+
+set_node_config(Config, Node, Tuples) ->
+    NodeConfig = get_node_config(Config, Node),
+    NodeConfig1 = rabbit_ct_helpers:set_config(NodeConfig, Tuples),
+    replace_entire_node_config(Config, Node, NodeConfig1).
+
+replace_entire_node_config(Config, Node, NewNodeConfig) ->
+    NodeConfigs = get_node_configs(Config),
+    NodeConfigs1 = lists:map(
+      fun(NodeConfig) ->
+          Match = case ?config(nodename, NodeConfig) of
+              Node -> true;
+              _    -> case ?config(initial_nodename, NodeConfig) of
+                      Node -> true;
+                      _    -> false
+                  end
+          end,
+          if
+              Match -> NewNodeConfig;
+              true  -> NodeConfig
+          end
+      end, NodeConfigs),
+    rabbit_ct_helpers:set_config(Config, {rmq_nodes, NodeConfigs1}).
+
+nodename_to_index(Config, Node) ->
+    NodeConfigs = get_node_configs(Config),
+    nodename_to_index1(NodeConfigs, Node, 0).
+
+nodename_to_index1([NodeConfig | Rest], Node, I) ->
+    case ?config(nodename, NodeConfig) of
+        Node -> I;
+        _    -> case ?config(initial_nodename, NodeConfig) of
+                    Node -> I;
+                    _    -> nodename_to_index1(Rest, Node, I + 1)
+                end
+    end;
+nodename_to_index1([], Node, _) ->
+    exit({unknown_node, Node}).
+
+node_uri(Config, Node) ->
+    node_uri(Config, Node, []).
+
+node_uri(Config, Node, amqp) ->
+    node_uri(Config, Node, []);
+node_uri(Config, Node, management) ->
+    node_uri(Config, Node, [
+        {scheme, "http"},
+        {tcp_port_name, tcp_port_mgmt}
+      ]);
+node_uri(Config, Node, Options) ->
+    Scheme = proplists:get_value(scheme, Options, "amqp"),
+    Hostname = case proplists:get_value(use_ipaddr, Options, false) of
+        true ->
+            {ok, Hostent} = inet:gethostbyname(?config(rmq_hostname, Config)),
+            format_ipaddr_for_uri(Hostent);
+        Family when Family =:= inet orelse Family =:= inet6 ->
+            {ok, Hostent} = inet:gethostbyname(?config(rmq_hostname, Config),
+              Family),
+            format_ipaddr_for_uri(Hostent);
+        false ->
+            ?config(rmq_hostname, Config)
+    end,
+    TcpPortName = proplists:get_value(tcp_port_name, Options, tcp_port_amqp),
+    TcpPort = get_node_config(Config, Node, TcpPortName),
+    UserPass = case proplists:get_value(with_user, Options, false) of
+        true ->
+            User = proplists:get_value(user, Options, "guest"),
+            Password = proplists:get_value(password, Options, "guest"),
+            io_lib:format("~s:~s@", [User, Password]);
+        false ->
+            ""
+    end,
+    list_to_binary(
+      rabbit_misc:format("~s://~s~s:~b",
+        [Scheme, UserPass, Hostname, TcpPort])).
+
+format_ipaddr_for_uri(
+  #hostent{h_addrtype = inet, h_addr_list = [IPAddr | _]}) ->
+    {A, B, C, D} = IPAddr,
+    io_lib:format("~b.~b.~b.~b", [A, B, C, D]);
+format_ipaddr_for_uri(
+  #hostent{h_addrtype = inet6, h_addr_list = [IPAddr | _]}) ->
+    {A, B, C, D, E, F, G, H} = IPAddr,
+    Res0 = io_lib:format(
+      "~.16b:~.16b:~.16b:~.16b:~.16b:~.16b:~.16b:~.16b",
+      [A, B, C, D, E, F, G, H]),
+    Res1 = re:replace(Res0, "(^0(:0)+$|^(0:)+|(:0)+$)|:(0:)+", "::"),
+    "[" ++ Res1 ++ "]".
+
+%% Functions to execute code on a remote node/broker.
+
+add_code_path_to_node(Node, Module) ->
+    Path1 = filename:dirname(code:which(Module)),
+    Path2 = filename:dirname(code:which(?MODULE)),
+    Paths = lists:usort([Path1, Path2]),
+    ExistingPaths = rpc:call(Node, code, get_path, []),
+    lists:foreach(
+      fun(P) ->
+          case lists:member(P, ExistingPaths) of
+              true  -> ok;
+              false -> true = rpc:call(Node, code, add_pathz, [P])
+          end
+      end, Paths).
+
+add_code_path_to_all_nodes(Config, Module) ->
+    Nodenames = get_node_configs(Config, nodename),
+    [ok = add_code_path_to_node(Nodename, Module)
+      || Nodename <- Nodenames],
+    ok.
+
+rpc(Config, Node, Module, Function, Args)
+when is_atom(Node) andalso Node =/= undefined ->
+    rpc(Config, Node, Module, Function, Args, infinity);
+rpc(Config, I, Module, Function, Args)
+when is_integer(I) andalso I >= 0 ->
+    Node = get_node_config(Config, I, nodename),
+    rpc(Config, Node, Module, Function, Args);
+rpc(Config, Nodes, Module, Function, Args)
+when is_list(Nodes) ->
+    [rpc(Config, Node, Module, Function, Args) || Node <- Nodes].
+
+rpc(_Config, Node, Module, Function, Args, Timeout)
+when is_atom(Node) andalso Node =/= undefined ->
+    %% We add some directories to the broker node search path.
+    add_code_path_to_node(Node, Module),
+    %% If there is an exception, rpc:call/{4,5} returns the exception as
+    %% a "normal" return value. If there is an exit signal, we raise
+    %% it again. In both cases, we have no idea of the module and line
+    %% number which triggered the issue.
+    Ret = case Timeout of
+        infinity -> rpc:call(Node, Module, Function, Args);
+        _        -> rpc:call(Node, Module, Function, Args, Timeout)
+    end,
+    case Ret of
+        {badrpc, {'EXIT', Reason}} -> exit(Reason);
+        {badrpc, Reason}           -> exit(Reason);
+        Ret                        -> Ret
+    end;
+rpc(Config, I, Module, Function, Args, Timeout)
+when is_integer(I) andalso I >= 0 ->
+    Node = get_node_config(Config, I, nodename),
+    rpc(Config, Node, Module, Function, Args, Timeout);
+rpc(Config, Nodes, Module, Function, Args, Timeout)
+when is_list(Nodes) ->
+    [rpc(Config, Node, Module, Function, Args, Timeout) || Node <- Nodes].
+
+rpc_all(Config, Module, Function, Args) ->
+    Nodes = get_node_configs(Config, nodename),
+    rpc(Config, Nodes, Module, Function, Args).
+
+rpc_all(Config, Module, Function, Args, Timeout) ->
+    Nodes = get_node_configs(Config, nodename),
+    rpc(Config, Nodes, Module, Function, Args, Timeout).
+
+%% Functions to start/restart/stop only the broker or the full Erlang
+%% node.
+
+start_node(Config, Node) ->
+    NodeConfig = get_node_config(Config, Node),
+    I = if
+        is_atom(Node) -> nodename_to_index(Config, Node);
+        true          -> Node
+    end,
+    case do_start_rabbitmq_node(Config, NodeConfig, I) of
+        {skip, _} = Error -> {error, Error};
+        _                 -> ok
+    end.
+
+start_broker(Config, Node) ->
+    ok = rpc(Config, Node, rabbit, start, []).
+
+restart_broker(Config, Node) ->
+    ok = rpc(Config, Node, ?MODULE, do_restart_broker, []).
+
+do_restart_broker() ->
+    ok = rabbit:stop(),
+    ok = rabbit:start().
+
+stop_broker(Config, Node) ->
+    ok = rpc(Config, Node, rabbit, stop, []).
+
+restart_node(Config, Node) ->
+    ok = stop_node(Config, Node),
+    ok = start_node(Config, Node).
+
+stop_node(Config, Node) ->
+    NodeConfig = get_node_config(Config, Node),
+    case stop_rabbitmq_node(Config, NodeConfig) of
+        {skip, _} = Error -> Error;
+        _                 -> ok
+    end.
+
+stop_node_after(Config, Node, Sleep) ->
+    timer:sleep(Sleep),
+    stop_node(Config, Node).
+
+kill_node(Config, Node) ->
+    Pid = rpc(Config, Node, os, getpid, []),
+    %% FIXME maybe_flush_cover(Cfg),
+    os:cmd("kill -9 " ++ Pid),
+    await_os_pid_death(Pid).
+
+kill_node_after(Config, Node, Sleep) ->
+    timer:sleep(Sleep),
+    kill_node(Config, Node).
+
+await_os_pid_death(Pid) ->
+    case rabbit_misc:is_os_process_alive(Pid) of
+        true  -> timer:sleep(100),
+                 await_os_pid_death(Pid);
+        false -> ok
+    end.
+
+%% From a given list of gen_tcp client connections, return the list of
+%% connection handler PID in RabbitMQ.
+get_connection_pids(Connections) ->
+    ConnInfos = [
+      begin
+          {ok, {Addr, Port}} = inet:sockname(Connection),
+          [{peer_host, Addr}, {peer_port, Port}]
+      end || Connection <- Connections],
+    lists:filter(
+      fun(Conn) ->
+          ConnInfo = rabbit_networking:connection_info(Conn,
+            [peer_host, peer_port]),
+          %% On at least Mac OS X, for a connection on localhost, the
+          %% client side of the connection gives its IPv4 address
+          %% (127.0.0.1), but the server side gives some kind of
+          %% non-standard IPv6 address (::ffff:7f00:1, not even the
+          %% standard ::1). So let's test for this alternate form too.
+          AltConnInfo = case proplists:get_value(peer_host, ConnInfo) of
+              {0, 0, 0, 0, 0, 16#ffff, 16#7f00, N} ->
+                  lists:keyreplace(peer_host, 1, ConnInfo,
+                      {peer_host, {127, 0, 0, N}});
+              _ ->
+                  ConnInfo
+          end,
+          lists:member(ConnInfo, ConnInfos) orelse
+          lists:member(AltConnInfo, ConnInfos)
+      end, rabbit_networking:connections()).
+
+%% Return the PID of the given queue's supervisor.
+get_queue_sup_pid(QueuePid) ->
+    Sups = supervisor:which_children(rabbit_amqqueue_sup_sup),
+    get_queue_sup_pid(Sups, QueuePid).
+
+get_queue_sup_pid([{_, SupPid, _, _} | Rest], QueuePid) ->
+    WorkerPids = [Pid || {_, Pid, _, _} <- supervisor:which_children(SupPid)],
+    case lists:member(QueuePid, WorkerPids) of
+        true  -> SupPid;
+        false -> get_queue_sup_pid(Rest, QueuePid)
+    end;
+get_queue_sup_pid([], _QueuePid) ->
+    undefined.
+
+%% -------------------------------------------------------------------
+%% Policy helpers.
+%% -------------------------------------------------------------------
+
+set_policy(Config, Node, Name, Pattern, ApplyTo, Definition) ->
+    ok = rpc(Config, Node,
+      rabbit_policy, set, [<<"/">>, Name, Pattern, Definition, 0, ApplyTo]).
+
+clear_policy(Config, Node, Name) ->
+    ok = rpc(Config, Node,
+      rabbit_policy, delete, [<<"/">>, Name]).
+
+set_ha_policy(Config, Node, Pattern, Policy) ->
+    set_ha_policy(Config, Node, Pattern, Policy, []).
+
+set_ha_policy(Config, Node, Pattern, Policy, Extra) ->
+    set_policy(Config, Node, Pattern, Pattern, <<"queues">>,
+      ha_policy(Policy) ++ Extra).
+
+ha_policy(<<"all">>)      -> [{<<"ha-mode">>,   <<"all">>}];
+ha_policy({Mode, Params}) -> [{<<"ha-mode">>,   Mode},
+                              {<<"ha-params">>, Params}].
+
+set_ha_policy_all(Config) ->
+    set_ha_policy(Config, 0, <<".*">>, <<"all">>),
+    Config.
+
+set_ha_policy_two_pos(Config) ->
+    Members = [
+      rabbit_misc:atom_to_binary(N)
+      || N <- get_node_configs(Config, nodename)],
+    TwoNodes = [M || M <- lists:sublist(Members, 2)],
+    set_ha_policy(Config, 0, <<"^ha.two.">>, {<<"nodes">>, TwoNodes},
+                  [{<<"ha-promote-on-shutdown">>, <<"always">>}]),
+    set_ha_policy(Config, 0, <<"^ha.auto.">>, {<<"nodes">>, TwoNodes},
+                  [{<<"ha-sync-mode">>,           <<"automatic">>},
+                   {<<"ha-promote-on-shutdown">>, <<"always">>}]),
+    Config.
+
+set_ha_policy_two_pos_batch_sync(Config) ->
+    Members = [
+      rabbit_misc:atom_to_binary(N)
+      || N <- get_node_configs(Config, nodename)],
+    TwoNodes = [M || M <- lists:sublist(Members, 2)],
+    set_ha_policy(Config, 0, <<"^ha.two.">>, {<<"nodes">>, TwoNodes},
+                  [{<<"ha-promote-on-shutdown">>, <<"always">>}]),
+    set_ha_policy(Config, 0, <<"^ha.auto.">>, {<<"nodes">>, TwoNodes},
+                  [{<<"ha-sync-mode">>,           <<"automatic">>},
+                   {<<"ha-sync-batch-size">>,     200},
+                   {<<"ha-promote-on-shutdown">>, <<"always">>}]),
+    Config.
+
+%% -------------------------------------------------------------------
+%% Parameter helpers.
+%% -------------------------------------------------------------------
+
+set_parameter(Config, Node, Component, Name, Value) ->
+    ok = rpc(Config, Node,
+      rabbit_runtime_parameters, set, [<<"/">>, Component, Name, Value, none]).
+
+clear_parameter(Config, Node, Component, Name) ->
+    ok = rpc(Config, Node,
+      rabbit_runtime_parameters, clear, [<<"/">>, Component, Name]).
+
+%% -------------------------------------------------------------------
+%% Parameter helpers.
+%% -------------------------------------------------------------------
+
+enable_plugin(Config, Node, Plugin) ->
+    plugin_action(Config, Node, enable, [Plugin], []).
+
+disable_plugin(Config, Node, Plugin) ->
+    plugin_action(Config, Node, disable, [Plugin], []).
+
+plugin_action(Config, Node, Command, Args, Opts) ->
+    PluginsFile = rabbit_ct_broker_helpers:get_node_config(Config, Node,
+      enabled_plugins_file),
+    PluginsDir = rabbit_ct_broker_helpers:get_node_config(Config, Node,
+      plugins_dir),
+    Nodename = rabbit_ct_broker_helpers:get_node_config(Config, Node,
+      nodename),
+    rabbit_ct_broker_helpers:rpc(Config, Node,
+      rabbit_plugins_main, action,
+      [Command, Nodename, Args, Opts, PluginsFile, PluginsDir]).
+
+%% -------------------------------------------------------------------
+
+test_channel() ->
+    Me = self(),
+    Writer = spawn(fun () -> test_writer(Me) end),
+    {ok, Limiter} = rabbit_limiter:start_link(no_id),
+    {ok, Ch} = rabbit_channel:start_link(
+                 1, Me, Writer, Me, "", rabbit_framing_amqp_0_9_1,
+                 user(<<"guest">>), <<"/">>, [], Me, Limiter),
+    {Writer, Limiter, Ch}.
+
+test_writer(Pid) ->
+    receive
+        {'$gen_call', From, flush} -> gen_server:reply(From, ok),
+                                      test_writer(Pid);
+        {send_command, Method}     -> Pid ! Method,
+                                      test_writer(Pid);
+        shutdown                   -> ok
+    end.
+
+user(Username) ->
+    #user{username       = Username,
+          tags           = [administrator],
+          authz_backends = [{rabbit_auth_backend_internal, none}]}.
diff --git a/rabbitmq-server/deps/rabbit_common/src/rabbit_ct_helpers.erl b/rabbitmq-server/deps/rabbit_common/src/rabbit_ct_helpers.erl
new file mode 100644 (file)
index 0000000..ece6fe1
--- /dev/null
@@ -0,0 +1,537 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_ct_helpers).
+
+-include_lib("common_test/include/ct.hrl").
+
+-export([
+    log_environment/0,
+    run_steps/2,
+    run_setup_steps/1, run_setup_steps/2,
+    run_teardown_steps/1, run_teardown_steps/2,
+    ensure_application_srcdir/3,
+    start_long_running_testsuite_monitor/1,
+    stop_long_running_testsuite_monitor/1,
+    config_to_testcase_name/2,
+    testcases/1,
+    testcase_number/3,
+    testcase_absname/2, testcase_absname/3,
+    testcase_started/2, testcase_finished/2,
+    exec/1, exec/2,
+    make/3,
+    get_config/2, set_config/2,
+    merge_app_env/2, merge_app_env_in_erlconf/2,
+    cover_work_factor/2
+  ]).
+
+-define(SSL_CERT_PASSWORD, "test").
+
+%% -------------------------------------------------------------------
+%% Testsuite internal helpers.
+%% -------------------------------------------------------------------
+
+log_environment() ->
+    Vars = lists:sort(fun(A, B) -> A =< B end, os:getenv()),
+    ct:pal(?LOW_IMPORTANCE, "Environment variables:~n~s", [
+        [io_lib:format("  ~s~n", [V]) || V <- Vars]]).
+
+run_setup_steps(Config) ->
+    run_setup_steps(Config, []).
+
+run_setup_steps(Config, ExtraSteps) ->
+    Steps = [
+      fun ensure_current_srcdir/1,
+      fun ensure_rabbit_common_srcdir/1,
+      fun ensure_erlang_mk_depsdir/1,
+      fun ensure_rabbit_srcdir/1,
+      fun ensure_make_cmd/1,
+      fun ensure_erl_call_cmd/1,
+      fun ensure_rabbitmqctl_cmd/1,
+      fun ensure_ssl_certs/1,
+      fun start_long_running_testsuite_monitor/1
+    ],
+    run_steps(Config, Steps ++ ExtraSteps).
+
+run_teardown_steps(Config) ->
+    run_teardown_steps(Config, []).
+
+run_teardown_steps(Config, ExtraSteps) ->
+    Steps = [
+      fun stop_long_running_testsuite_monitor/1
+    ],
+    run_steps(Config, ExtraSteps ++ Steps).
+
+run_steps(Config, [Step | Rest]) ->
+    case Step(Config) of
+        {skip, _} = Error -> Error;
+        Config1           -> run_steps(Config1, Rest)
+    end;
+run_steps(Config, []) ->
+    Config.
+
+ensure_current_srcdir(Config) ->
+    Path = case get_config(Config, current_srcdir) of
+        undefined ->
+            os:getenv("PWD");
+        P ->
+            P
+    end,
+    case filelib:is_dir(Path) of
+        true  -> set_config(Config, {current_srcdir, Path});
+        false -> {skip,
+                  "Current source directory required, " ++
+                  "please set 'current_srcdir' in ct config"}
+    end.
+
+ensure_rabbit_common_srcdir(Config) ->
+    Path = case get_config(Config, rabbit_common_srcdir) of
+        undefined ->
+            filename:dirname(
+              filename:dirname(
+                code:which(?MODULE)));
+        P ->
+            P
+    end,
+    case filelib:is_dir(Path) of
+        true  -> set_config(Config, {rabbit_common_srcdir, Path});
+        false -> {skip,
+                  "rabbit_common source directory required, " ++
+                  "please set 'rabbit_common_srcdir' in ct config"}
+    end.
+
+ensure_erlang_mk_depsdir(Config) ->
+    Path = case get_config(Config, erlang_mk_depsdir) of
+        undefined ->
+            case os:getenv("DEPS_DIR") of
+                false ->
+                    %% Try the common locations.
+                    SrcDir = ?config(rabbit_common_srcdir, Config),
+                    Ds = [
+                      filename:join(SrcDir, "deps"),
+                      filename:join(SrcDir, "../../deps")
+                    ],
+                    case lists:filter(fun filelib:is_dir/1, Ds) of
+                        [P |_] -> P;
+                        []     -> false
+                    end;
+                P ->
+                    P
+            end;
+        P ->
+            P
+    end,
+    case Path =/= false andalso filelib:is_dir(Path) of
+        true  -> set_config(Config, {erlang_mk_depsdir, Path});
+        false -> {skip,
+                  "deps directory required, " ++
+                  "please set DEPSD_DIR or 'erlang_mk_depsdir' " ++
+                  "in ct config"}
+    end.
+
+ensure_rabbit_srcdir(Config) ->
+    ensure_application_srcdir(Config, rabbit, rabbit).
+
+ensure_application_srcdir(Config, App, Module) ->
+    AppS = atom_to_list(App),
+    Key = list_to_atom(AppS ++ "_srcdir"),
+    Path = case get_config(Config, Key) of
+        undefined ->
+            case code:which(Module) of
+                non_existing ->
+                    filename:join(?config(rabbit_common_srcdir, Config), AppS);
+                P ->
+                    filename:dirname(
+                      filename:dirname(P))
+            end;
+        P ->
+            P
+    end,
+    case filelib:is_dir(Path) of
+        true  -> set_config(Config, {Key, Path});
+        false -> {skip,
+                  AppS ++ "source directory required, " ++
+                  "please set '" ++ AppS ++ "_srcdir' in ct config"}
+    end.
+
+ensure_make_cmd(Config) ->
+    Make = case get_config(Config, make_cmd) of
+        undefined ->
+            case os:getenv("MAKE") of
+                false -> "make";
+                M     -> M
+            end;
+        M ->
+            M
+    end,
+    Cmd = [Make, "--version"],
+    case exec(Cmd, [{match_stdout, "GNU Make"}]) of
+        {ok, _} -> set_config(Config, {make_cmd, Make});
+        _       -> {skip,
+                    "GNU Make required, " ++
+                    "please set MAKE or 'make_cmd' in ct config"}
+    end.
+
+ensure_erl_call_cmd(Config) ->
+    ErlCallDir = code:lib_dir(erl_interface, bin),
+    ErlCall = filename:join(ErlCallDir, "erl_call"),
+    Cmd = [ErlCall],
+    case exec(Cmd, [{match_stdout, "Usage: "}]) of
+        {ok, _} -> set_config(Config, {erl_call_cmd, ErlCall});
+        _       -> {skip,
+                    "erl_call required, " ++
+                    "please set ERL_CALL or 'erl_call_cmd' in ct config"}
+    end.
+
+ensure_rabbitmqctl_cmd(Config) ->
+    Rabbitmqctl = case get_config(Config, rabbitmqctl_cmd) of
+        undefined ->
+            case os:getenv("RABBITMQCTL") of
+                false ->
+                    SrcDir = ?config(rabbit_srcdir, Config),
+                    R = filename:join(SrcDir, "scripts/rabbitmqctl"),
+                    case filelib:is_file(R) of
+                        true  -> R;
+                        false -> false
+                    end;
+                R ->
+                    R
+            end;
+        R ->
+            R
+    end,
+    Error = {skip, "rabbitmqctl required, " ++
+             "please set RABBITMQCTL or 'rabbitmqctl_cmd' in ct config"},
+    case Rabbitmqctl of
+        false ->
+            Error;
+        _ ->
+            Cmd = [Rabbitmqctl],
+            case exec(Cmd, [drop_stdout]) of
+                {error, 64, _} ->
+                    set_config(Config, {rabbitmqctl_cmd, Rabbitmqctl});
+                _ ->
+                    Error
+            end
+    end.
+
+ensure_ssl_certs(Config) ->
+    SrcDir = ?config(rabbit_common_srcdir, Config),
+    CertsMakeDir = filename:join([SrcDir, "tools", "tls-certs"]),
+    PrivDir = ?config(priv_dir, Config),
+    CertsDir = filename:join(PrivDir, "certs"),
+    CertsPwd = proplists:get_value(rmq_certspwd, Config, ?SSL_CERT_PASSWORD),
+    Cmd = [
+      "PASSWORD=" ++ CertsPwd,
+      "DIR=" ++ CertsDir],
+    case make(Config, CertsMakeDir, Cmd) of
+        {ok, _} ->
+            %% Add SSL certs to the broker configuration.
+            Config1 = merge_app_env(Config,
+              {rabbit, [
+                  {ssl_options, [
+                      {cacertfile, filename:join([CertsDir, "testca", "cacert.pem"])},
+                      {certfile, filename:join([CertsDir, "server", "cert.pem"])},
+                      {keyfile, filename:join([CertsDir, "server", "key.pem"])},
+                      {verify, verify_peer},
+                      {fail_if_no_peer_cert, true}
+                    ]}]}),
+            set_config(Config1, {rmq_certsdir, CertsDir});
+        _ ->
+            {skip, "Failed to create SSL certificates"}
+    end.
+
+%% -------------------------------------------------------------------
+%% Process to log a message every minute during long testcases.
+%% -------------------------------------------------------------------
+
+-define(PING_CT_INTERVAL, 60 * 1000). %% In milliseconds.
+
+start_long_running_testsuite_monitor(Config) ->
+    Pid = spawn(
+      fun() ->
+          {ok, TimerRef} = timer:send_interval(?PING_CT_INTERVAL, ping_ct),
+          long_running_testsuite_monitor(TimerRef, [])
+      end),
+    set_config(Config, {long_running_testsuite_monitor, Pid}).
+
+stop_long_running_testsuite_monitor(Config) ->
+    ?config(long_running_testsuite_monitor, Config) ! stop,
+    Config.
+
+long_running_testsuite_monitor(TimerRef, Testcases) ->
+    receive
+        {started, Testcase} ->
+            Testcases1 = [{Testcase, time_compat:monotonic_time(seconds)}
+                          | Testcases],
+            long_running_testsuite_monitor(TimerRef, Testcases1);
+        {finished, Testcase} ->
+            Testcases1 = proplists:delete(Testcase, Testcases),
+            long_running_testsuite_monitor(TimerRef, Testcases1);
+        ping_ct ->
+            T1 = time_compat:monotonic_time(seconds),
+            ct:pal(?STD_IMPORTANCE, "Testcases still in progress:~s",
+              [[
+                  begin
+                      TDiff = format_time_diff(T1, T0),
+                      rabbit_misc:format("~n - ~s (~s)", [TC, TDiff])
+                  end
+                  || {TC, T0} <- Testcases
+                ]]),
+            long_running_testsuite_monitor(TimerRef, Testcases);
+        stop ->
+            timer:cancel(TimerRef)
+    end.
+
+format_time_diff(T1, T0) ->
+    Diff = T1 - T0,
+    Hours = Diff div 3600,
+    Diff1 = Diff rem 3600,
+    Minutes = Diff1 div 60,
+    Seconds = Diff1 rem 60,
+    rabbit_misc:format("~b:~2..0b:~2..0b", [Hours, Minutes, Seconds]).
+
+testcase_started(Config, Testcase) ->
+    Testcase1 = config_to_testcase_name(Config, Testcase),
+    ?config(long_running_testsuite_monitor, Config) ! {started, Testcase1},
+    Config.
+
+testcase_finished(Config, Testcase) ->
+    Testcase1 = config_to_testcase_name(Config, Testcase),
+    ?config(long_running_testsuite_monitor, Config) ! {finished, Testcase1},
+    Config.
+
+config_to_testcase_name(Config, Testcase) ->
+    testcase_absname(Config, Testcase).
+
+testcase_absname(Config, Testcase) ->
+    testcase_absname(Config, Testcase, "/").
+
+testcase_absname(Config, Testcase, Sep) ->
+    Name = rabbit_misc:format("~s", [Testcase]),
+    case get_config(Config, tc_group_properties) of
+        [] ->
+            Name;
+        Props ->
+            Name1 = case Name of
+                "" ->
+                    rabbit_misc:format("~s",
+                      [proplists:get_value(name, Props)]);
+                _ ->
+                    rabbit_misc:format("~s~s~s",
+                      [proplists:get_value(name, Props), Sep, Name])
+            end,
+            testcase_absname1(Name1,
+              get_config(Config, tc_group_path), Sep)
+    end.
+
+testcase_absname1(Name, [Props | Rest], Sep) ->
+    Name1 = rabbit_misc:format("~s~s~s",
+      [proplists:get_value(name, Props), Sep, Name]),
+    testcase_absname1(Name1, Rest, Sep);
+testcase_absname1(Name, [], _) ->
+    lists:flatten(Name).
+
+testcases(Testsuite) ->
+    All = Testsuite:all(),
+    testcases1(Testsuite, All, [], []).
+
+testcases1(Testsuite, [{group, GroupName} | Rest], CurrentPath, Testcases) ->
+    Group = {GroupName, _, _} = lists:keyfind(GroupName, 1, Testsuite:groups()),
+    testcases1(Testsuite, [Group | Rest], CurrentPath, Testcases);
+testcases1(Testsuite, [{GroupName, _, Children} | Rest],
+  CurrentPath, Testcases) ->
+    Testcases1 = testcases1(Testsuite, Children,
+      [[{name, GroupName}] | CurrentPath], Testcases),
+    testcases1(Testsuite, Rest, CurrentPath, Testcases1);
+testcases1(Testsuite, [Testcase | Rest], CurrentPath, Testcases)
+when is_atom(Testcase) ->
+    {Props, Path} = case CurrentPath of
+        []      -> {[], []};
+        [H | T] -> {H, T}
+    end,
+    Name = config_to_testcase_name([
+        {tc_group_properties, Props},
+        {tc_group_path, Path}
+      ], Testcase),
+    testcases1(Testsuite, Rest, CurrentPath, [Name | Testcases]);
+testcases1(_, [], [], Testcases) ->
+    lists:reverse(Testcases);
+testcases1(_, [], _, Testcases) ->
+    Testcases.
+
+testcase_number(Config, TestSuite, TestName) ->
+    Testcase = config_to_testcase_name(Config, TestName),
+    Testcases = testcases(TestSuite),
+    testcase_number1(Testcases, Testcase, 0).
+
+testcase_number1([Testcase | _], Testcase, N) ->
+    N;
+testcase_number1([_ | Rest], Testcase, N) ->
+    testcase_number1(Rest, Testcase, N + 1);
+testcase_number1([], _, N) ->
+    N.
+
+%% -------------------------------------------------------------------
+%% Helpers for helpers.
+%% -------------------------------------------------------------------
+
+exec(Cmd) ->
+    exec(Cmd, []).
+
+exec([Cmd | Args], Options) when is_list(Cmd) orelse is_binary(Cmd) ->
+    Cmd1 = case (lists:member($/, Cmd) orelse lists:member($\\, Cmd)) of
+        true ->
+            Cmd;
+        false ->
+            case os:find_executable(Cmd) of
+                false -> Cmd;
+                Path  -> Path
+            end
+    end,
+    Args1 = [format_arg(Arg) || Arg <- Args],
+    {LocalOptions, PortOptions} = lists:partition(
+      fun
+          ({match_stdout, _}) -> true;
+          (drop_stdout)       -> true;
+          (_)                 -> false
+      end, Options),
+    PortOptions1 = case lists:member(nouse_stdio, PortOptions) of
+        true  -> PortOptions;
+        false -> [use_stdio, stderr_to_stdout | PortOptions]
+    end,
+    Log = "+ ~s (pid ~p)",
+    {PortOptions2, Log1} = case proplists:get_value(env, PortOptions1) of
+        undefined ->
+            {PortOptions1, Log};
+        Env ->
+            Env1 = [
+              begin
+                  Key1 = format_arg(Key),
+                  Value1 = format_arg(Value),
+                  {Key1, Value1}
+              end
+              || {Key, Value} <- Env
+            ],
+            {
+              [{env, Env1} | proplists:delete(env, PortOptions1)],
+              Log ++ "~n~nEnvironment variables:~n" ++
+              string:join(
+                [rabbit_misc:format("  ~s=~s", [K, V]) || {K, V} <- Env1],
+                "~n")
+            }
+    end,
+    ct:pal(?LOW_IMPORTANCE, Log1, [string:join([Cmd1 | Args1], " "), self()]),
+    try
+        Port = erlang:open_port(
+          {spawn_executable, Cmd1}, [
+            {args, Args1},
+            exit_status
+            | PortOptions2]),
+        port_receive_loop(Port, "", LocalOptions)
+    catch
+        error:Reason ->
+            ct:pal(?LOW_IMPORTANCE, "~s: ~s",
+              [Cmd1, file:format_error(Reason)]),
+            {error, Reason, file:format_error(Reason)}
+    end.
+
+format_arg({Format, FormatArgs}) ->
+    rabbit_misc:format(Format, FormatArgs);
+format_arg(Arg) when is_atom(Arg) ->
+    atom_to_list(Arg);
+format_arg(Arg) when is_binary(Arg) ->
+    binary_to_list(Arg);
+format_arg(Arg) ->
+    Arg.
+
+port_receive_loop(Port, Stdout, Options) ->
+    receive
+        {Port, {exit_status, X}} ->
+            DropStdout = lists:member(drop_stdout, Options) orelse
+              Stdout =:= "",
+            if
+                DropStdout ->
+                    ct:pal(?LOW_IMPORTANCE, "Exit code: ~p (pid ~p)",
+                      [X, self()]);
+                true ->
+                    ct:pal(?LOW_IMPORTANCE, "~s~nExit code: ~p (pid ~p)",
+                      [Stdout, X, self()])
+            end,
+            case proplists:get_value(match_stdout, Options) of
+                undefined ->
+                    case X of
+                        0 -> {ok, Stdout};
+                        _ -> {error, X, Stdout}
+                    end;
+                RE ->
+                    case re:run(Stdout, RE, [{capture, none}]) of
+                        match   -> {ok, Stdout};
+                        nomatch -> {error, X, Stdout}
+                    end
+            end;
+        {Port, {data, Out}} ->
+            port_receive_loop(Port, Stdout ++ Out, Options)
+    end.
+
+make(Config, Dir, Args) ->
+    Make = ?config(make_cmd, Config),
+    Verbosity = case os:getenv("V") of
+        false -> [];
+        V     -> ["V=" ++ V]
+    end,
+    Cmd = [Make, "-C", Dir] ++ Verbosity ++ Args,
+    exec(Cmd).
+
+%% This is the same as ?config(), except this one doesn't log a warning
+%% if the key is missing.
+get_config(Config, Key) ->
+    proplists:get_value(Key, Config).
+
+set_config(Config, Tuple) when is_tuple(Tuple) ->
+    Key = element(1, Tuple),
+    lists:keystore(Key, 1, Config, Tuple);
+set_config(Config, [Tuple | Rest]) ->
+    Config1 = set_config(Config, Tuple),
+    set_config(Config1, Rest);
+set_config(Config, []) ->
+    Config.
+
+merge_app_env(Config, Env) ->
+    ErlangConfig = proplists:get_value(erlang_node_config, Config, []),
+    ErlangConfig1 = merge_app_env_in_erlconf(ErlangConfig, Env),
+    set_config(Config, {erlang_node_config, ErlangConfig1}).
+
+merge_app_env_in_erlconf(ErlangConfig, {App, Env}) ->
+    AppConfig = proplists:get_value(App, ErlangConfig, []),
+    AppConfig1 = lists:foldl(
+      fun({Key, _} = Tuple, AC) ->
+          lists:keystore(Key, 1, AC, Tuple)
+      end, AppConfig, Env),
+    lists:keystore(App, 1, ErlangConfig, {App, AppConfig1});
+merge_app_env_in_erlconf(ErlangConfig, [Env | Rest]) ->
+    ErlangConfig1 = merge_app_env_in_erlconf(ErlangConfig, Env),
+    merge_app_env_in_erlconf(ErlangConfig1, Rest);
+merge_app_env_in_erlconf(ErlangConfig, []) ->
+    ErlangConfig.
+
+%% -------------------------------------------------------------------
+%% Cover-related functions.
+%% -------------------------------------------------------------------
+
+%% TODO.
+cover_work_factor(_Config, Without) ->
+    Without.
diff --git a/rabbitmq-server/deps/rabbit_common/src/rabbit_error_logger_handler.erl b/rabbitmq-server/deps/rabbit_common/src/rabbit_error_logger_handler.erl
new file mode 100644 (file)
index 0000000..314d0e6
--- /dev/null
@@ -0,0 +1,175 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+-module(rabbit_error_logger_handler).
+
+-behaviour(gen_event).
+
+%% API
+-export([start_link/0, add_handler/0]).
+
+%% gen_event callbacks
+-export([init/1, handle_event/2, handle_call/2, 
+         handle_info/2, terminate/2, code_change/3]).
+
+-define(SERVER, ?MODULE).
+
+-record(state, {report = []}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Creates an event manager
+%%
+%% @spec start_link() -> {ok, Pid} | {error, Error}
+%% @end
+%%--------------------------------------------------------------------
+start_link() ->
+    gen_event:start_link({local, ?SERVER}).
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Adds an event handler
+%%
+%% @spec add_handler() -> ok | {'EXIT', Reason} | term()
+%% @end
+%%--------------------------------------------------------------------
+add_handler() ->
+    gen_event:add_handler(?SERVER, ?MODULE, []).
+
+%%%===================================================================
+%%% gen_event callbacks
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever a new event handler is added to an event manager,
+%% this function is called to initialize the event handler.
+%%
+%% @spec init(Args) -> {ok, State}
+%% @end
+%%--------------------------------------------------------------------
+init([]) ->
+    {ok, #state{}}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event manager receives an event sent using
+%% gen_event:notify/2 or gen_event:sync_notify/2, this function is
+%% called for each installed event handler to handle the event.
+%%
+%% @spec handle_event(Event, State) ->
+%%                          {ok, State} |
+%%                          {swap_handler, Args1, State1, Mod2, Args2} |
+%%                          remove_handler
+%% @end
+%%--------------------------------------------------------------------
+
+handle_event({info_report, _Gleader, {_Pid, _Type,
+                                      {net_kernel, {'EXIT', _, Reason}}}},
+             #state{report = Report} = State) ->
+    NewReport = case format(Reason) of
+                    [] -> Report;
+                    Formatted -> [Formatted | Report]
+                end,
+    {ok, State#state{report = NewReport}};
+handle_event(_Event, State) ->
+    {ok, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event manager receives a request sent using
+%% gen_event:call/3,4, this function is called for the specified
+%% event handler to handle the request.
+%%
+%% @spec handle_call(Request, State) ->
+%%                   {ok, Reply, State} |
+%%                   {swap_handler, Reply, Args1, State1, Mod2, Args2} |
+%%                   {remove_handler, Reply}
+%% @end
+%%--------------------------------------------------------------------
+handle_call(get_connection_report, State) ->
+    {ok, lists:reverse(State#state.report), State#state{report = []}};
+handle_call(_Request, State) ->
+    Reply = ok,
+    {ok, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% This function is called for each installed event handler when
+%% an event manager receives any other message than an event or a
+%% synchronous request (or a system message).
+%%
+%% @spec handle_info(Info, State) ->
+%%                         {ok, State} |
+%%                         {swap_handler, Args1, State1, Mod2, Args2} |
+%%                         remove_handler
+%% @end
+%%--------------------------------------------------------------------
+handle_info(_Info, State) ->
+    {ok, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event handler is deleted from an event manager, this
+%% function is called. It should be the opposite of Module:init/1 and
+%% do any necessary cleaning up.
+%%
+%% @spec terminate(Reason, State) -> void()
+%% @end
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+    ok.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Convert process state when code is changed
+%%
+%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState}
+%% @end
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+format({check_dflag_xnc_failed, _What}) ->
+    {"  * Remote node uses an incompatible Erlang version ~n", []};
+format({recv_challenge_failed, no_node, Node}) ->
+    {"  * Hostname mismatch: node ~p believes its host is different. Please ensure that hostnames resolve the same way locally and on ~p~n", [Node, Node]};
+format({recv_challenge_failed, Error}) ->
+    {"  * Distribution failed unexpectedly while waiting for challenge: ~p~n", [Error]};
+format({recv_challenge_ack_failed, bad_cookie}) ->
+    {"  * Authentication failed (rejected by the local node), please check the Erlang cookie~n", []};
+format({recv_challenge_ack_failed, {error, closed}}) ->
+    {"  * Authentication failed (rejected by the remote node), please check the Erlang cookie~n", []};
+format({recv_status_failed, not_allowed}) ->
+    {"  * This node is not on the list of nodes authorised by remote node (see net_kernel:allow/1)~n", []};
+format({recv_status_failed, {error, closed}}) ->
+    {"  * Remote host closed TCP connection before completing authentication. Is the Erlang distribution using TLS?~n", []};
+format(setup_timer_timeout) ->
+    {"  * TCP connection to remote host has timed out. Is the Erlang distribution using TLS?~n", []};
+format(_) ->
+    [].
index a0699733e490e2d0fcf1931d77761efe6e7fc6c3..bd869dd736f6acfa053e268b13b6a0f0af5ff01d 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([event_type/0, event_props/0, event_timestamp/0, event/0]).
 
--type(event_type() :: atom()).
--type(event_props() :: term()).
--type(event_timestamp() :: non_neg_integer()).
+-type event_type() :: atom().
+-type event_props() :: term().
+-type event_timestamp() :: non_neg_integer().
 
--type(event() :: #event { type      :: event_type(),
+-type event() :: #event { type      :: event_type(),
                           props     :: event_props(),
                           reference :: 'none' | reference(),
-                          timestamp :: event_timestamp() }).
-
--type(level() :: 'none' | 'coarse' | 'fine').
-
--type(timer_fun() :: fun (() -> 'ok')).
--type(container() :: tuple()).
--type(pos() :: non_neg_integer()).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(init_stats_timer/2 :: (container(), pos()) -> container()).
--spec(init_disabled_stats_timer/2 :: (container(), pos()) -> container()).
--spec(ensure_stats_timer/3 :: (container(), pos(), term()) -> container()).
--spec(stop_stats_timer/2 :: (container(), pos()) -> container()).
--spec(reset_stats_timer/2 :: (container(), pos()) -> container()).
--spec(stats_level/2 :: (container(), pos()) -> level()).
--spec(if_enabled/3 :: (container(), pos(), timer_fun()) -> 'ok').
--spec(notify/2 :: (event_type(), event_props()) -> 'ok').
--spec(notify/3 :: (event_type(), event_props(), reference() | 'none') -> 'ok').
--spec(notify_if/3 :: (boolean(), event_type(), event_props()) -> 'ok').
--spec(sync_notify/2 :: (event_type(), event_props()) -> 'ok').
--spec(sync_notify/3 :: (event_type(), event_props(),
-                        reference() | 'none') -> 'ok').
-
--endif.
+                          timestamp :: event_timestamp() }.
+
+-type level() :: 'none' | 'coarse' | 'fine'.
+
+-type timer_fun() :: fun (() -> 'ok').
+-type container() :: tuple().
+-type pos() :: non_neg_integer().
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec init_stats_timer(container(), pos()) -> container().
+-spec init_disabled_stats_timer(container(), pos()) -> container().
+-spec ensure_stats_timer(container(), pos(), term()) -> container().
+-spec stop_stats_timer(container(), pos()) -> container().
+-spec reset_stats_timer(container(), pos()) -> container().
+-spec stats_level(container(), pos()) -> level().
+-spec if_enabled(container(), pos(), timer_fun()) -> 'ok'.
+-spec notify(event_type(), event_props()) -> 'ok'.
+-spec notify(event_type(), event_props(), reference() | 'none') -> 'ok'.
+-spec notify_if(boolean(), event_type(), event_props()) -> 'ok'.
+-spec sync_notify(event_type(), event_props()) -> 'ok'.
+-spec sync_notify(event_type(), event_props(), reference() | 'none') -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
index bf57b2aaebe80574469cb7a1df24fc7b3548a174..a43991b2755c5736cb9e15ebc459cd2a86c601dc 100644 (file)
@@ -29,8 +29,6 @@
 %% It's possible in the future we might make decorators
 %% able to manipulate messages as they are published.
 
--ifdef(use_specs).
-
 -type(tx() :: 'transaction' | 'none').
 -type(serial() :: pos_integer() | tx()).
 
 %% none:no callbacks, noroute:all callbacks except route, all:all callbacks
 -callback active_for(rabbit_types:exchange()) -> 'none' | 'noroute' | 'all'.
 
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{description, 0}, {serialise_events, 1}, {create, 2}, {delete, 3},
-     {policy_changed, 2}, {add_binding, 3}, {remove_bindings, 3},
-     {route, 2}, {active_for, 1}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
-
 %%----------------------------------------------------------------------------
 
 %% select a subset of active decorators
index 0b7fda61f62437fb6dcf3f4cd67d03cf46ff680c..5f282d1a3f10d01a0d77da975d617b61bd7ed375 100644 (file)
@@ -16,8 +16,6 @@
 
 -module(rabbit_exchange_type).
 
--ifdef(use_specs).
-
 -type(tx() :: 'transaction' | 'none').
 -type(serial() :: pos_integer() | tx()).
 
 -callback assert_args_equivalence(rabbit_types:exchange(),
                                   rabbit_framing:amqp_table()) ->
     'ok' | rabbit_types:connection_exit().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{description, 0}, {serialise_events, 0}, {route, 2},
-     {validate, 1}, {validate_binding, 2}, {policy_changed, 2},
-     {create, 2}, {delete, 3}, {add_binding, 3}, {remove_bindings, 3},
-     {assert_args_equivalence, 2}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
diff --git a/rabbitmq-server/deps/rabbit_common/src/rabbit_health_check.erl b/rabbitmq-server/deps/rabbit_common/src/rabbit_health_check.erl
new file mode 100644 (file)
index 0000000..185b6ff
--- /dev/null
@@ -0,0 +1,85 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+-module(rabbit_health_check).
+
+%% External API
+-export([node/1, node/2]).
+
+%% Internal API
+-export([local/0]).
+
+-spec node(node(), timeout()) -> ok | {badrpc, term()} | {error_string, string()}.
+-spec local() -> ok | {error_string, string()}.
+
+%%----------------------------------------------------------------------------
+%% External functions
+%%----------------------------------------------------------------------------
+
+node(Node) ->
+    %% same default as in CLI
+    node(Node, 70000).
+node(Node, Timeout) ->
+    rabbit_misc:rpc_call(Node, rabbit_health_check, local, [], Timeout).
+
+local() ->
+    run_checks([list_channels, list_queues, alarms]).
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+run_checks([]) ->
+    ok;
+run_checks([C|Cs]) ->
+    case node_health_check(C) of
+        ok ->
+            run_checks(Cs);
+        Error ->
+            Error
+    end.
+
+node_health_check(list_channels) ->
+    case rabbit_channel:info_local([pid]) of
+        L when is_list(L) ->
+            ok;
+        Other ->
+            ErrorMsg = io_lib:format("list_channels unexpected output: ~p",
+                                     [Other]),
+            {error_string, ErrorMsg}
+    end;
+
+node_health_check(list_queues) ->
+    health_check_queues(rabbit_vhost:list());
+
+node_health_check(alarms) ->
+    case proplists:get_value(alarms, rabbit:status()) of
+        [] ->
+            ok;
+        Alarms ->
+            ErrorMsg = io_lib:format("resource alarm(s) in effect:~p", [Alarms]),
+            {error_string, ErrorMsg}
+    end.
+
+health_check_queues([]) ->
+    ok;
+health_check_queues([VHost|RestVHosts]) ->
+    case rabbit_amqqueue:info_local(VHost) of
+        L when is_list(L) ->
+            health_check_queues(RestVHosts);
+        Other ->
+            ErrorMsg = io_lib:format("list_queues unexpected output for vhost ~s: ~p",
+                                     [VHost, Other]),
+            {error_string, ErrorMsg}
+    end.
index fad9de2670df41fe8edb2c8e216ce085de4b0809..c9b366917a13c0735efa6c45a0e355d7631903af 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([heartbeaters/0]).
 
--type(heartbeaters() :: {rabbit_types:maybe(pid()), rabbit_types:maybe(pid())}).
+-type heartbeaters() :: {rabbit_types:maybe(pid()), rabbit_types:maybe(pid())}.
 
--type(heartbeat_callback() :: fun (() -> any())).
+-type heartbeat_callback() :: fun (() -> any()).
 
--spec(start/6 ::
-        (pid(), rabbit_net:socket(),
-         non_neg_integer(), heartbeat_callback(),
-         non_neg_integer(), heartbeat_callback()) -> heartbeaters()).
+-spec start
+        (pid(), rabbit_net:socket(), non_neg_integer(), heartbeat_callback(),
+         non_neg_integer(), heartbeat_callback()) ->
+            heartbeaters().
 
--spec(start/7 ::
+-spec start
         (pid(), rabbit_net:socket(), rabbit_types:proc_name(),
-         non_neg_integer(), heartbeat_callback(),
-         non_neg_integer(), heartbeat_callback()) -> heartbeaters()).
+         non_neg_integer(), heartbeat_callback(), non_neg_integer(),
+         heartbeat_callback()) ->
+            heartbeaters().
 
--spec(start_heartbeat_sender/4 ::
+-spec start_heartbeat_sender
         (rabbit_net:socket(), non_neg_integer(), heartbeat_callback(),
-         rabbit_types:proc_type_and_name()) -> rabbit_types:ok(pid())).
--spec(start_heartbeat_receiver/4 ::
+         rabbit_types:proc_type_and_name()) ->
+            rabbit_types:ok(pid()).
+-spec start_heartbeat_receiver
         (rabbit_net:socket(), non_neg_integer(), heartbeat_callback(),
-         rabbit_types:proc_type_and_name()) -> rabbit_types:ok(pid())).
-
--spec(pause_monitor/1 :: (heartbeaters()) -> 'ok').
--spec(resume_monitor/1 :: (heartbeaters()) -> 'ok').
+         rabbit_types:proc_type_and_name()) ->
+            rabbit_types:ok(pid()).
 
--spec(system_code_change/4 :: (_,_,_,_) -> {'ok',_}).
--spec(system_continue/3 :: (_,_,{_, _}) -> any()).
--spec(system_terminate/4 :: (_,_,_,_) -> none()).
+-spec pause_monitor(heartbeaters()) -> 'ok'.
+-spec resume_monitor(heartbeaters()) -> 'ok'.
 
--endif.
+-spec system_code_change(_,_,_,_) -> {'ok',_}.
+-spec system_continue(_,_,{_, _}) -> any().
+-spec system_terminate(_,_,_,_) -> none().
 
 %%----------------------------------------------------------------------------
 start(SupPid, Sock, SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun) ->
@@ -116,7 +115,7 @@ start_heartbeater(TimeoutSec, SupPid, Sock, TimeoutFun, Name, Callback,
       SupPid, {Name,
                {rabbit_heartbeat, Callback,
                 [Sock, TimeoutSec, TimeoutFun, {Name, Identity}]},
-               transient, ?MAX_WAIT, worker, [rabbit_heartbeat]}).
+               transient, ?WORKER_WAIT, worker, [rabbit_heartbeat]}).
 
 heartbeater(Params, Identity) ->
     Deb = sys:debug_options([]),
index 526757624ee37acb300a2910f2c9a786571ccf96..8965c59973b8245749a8cfb0fe132a8433deb7b8 100644 (file)
@@ -17,6 +17,7 @@
 -module(rabbit_misc).
 -include("rabbit.hrl").
 -include("rabbit_framing.hrl").
+-include("rabbit_misc.hrl").
 
 -export([method_record_type/1, polite_pause/0, polite_pause/1]).
 -export([die/1, frame_error/2, amqp_error/4, quit/1,
@@ -44,6 +45,7 @@
 -export([format/2, format_many/1, format_stderr/2]).
 -export([unfold/2, ceil/1, queue_fold/3]).
 -export([sort_field_table/1]).
+-export([atom_to_binary/1]).
 -export([pid_to_string/1, string_to_pid/1,
          pid_change_node/2, node_to_fake_pid/1]).
 -export([version_compare/2, version_compare/3]).
 -export([interval_operation/5]).
 -export([ensure_timer/4, stop_timer/2, send_after/3, cancel_timer/1]).
 -export([get_parent/0]).
--export([store_proc_name/1, store_proc_name/2]).
+-export([store_proc_name/1, store_proc_name/2, get_proc_name/0]).
 -export([moving_average/4]).
 -export([get_env/3]).
 -export([get_channel_operation_timeout/0]).
 -export([random/1]).
+-export([rpc_call/4, rpc_call/5, rpc_call/7]).
+-export([report_default_thread_pool_size/0]).
+-export([get_gc_info/1]).
 
 %% Horrible macro to use in guards
 -define(IS_BENIGN_EXIT(R),
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([resource_name/0, thunk/1, channel_or_connection_exit/0]).
 
--type(ok_or_error() :: rabbit_types:ok_or_error(any())).
--type(thunk(T) :: fun(() -> T)).
--type(resource_name() :: binary()).
--type(channel_or_connection_exit()
-      :: rabbit_types:channel_exit() | rabbit_types:connection_exit()).
--type(digraph_label() :: term()).
--type(graph_vertex_fun() ::
-        fun (({atom(), [term()]}) -> [{digraph:vertex(), digraph_label()}])).
--type(graph_edge_fun() ::
-        fun (({atom(), [term()]}) -> [{digraph:vertex(), digraph:vertex()}])).
--type(tref() :: {'erlang', reference()} | {timer, timer:tref()}).
-
--spec(method_record_type/1 :: (rabbit_framing:amqp_method_record())
-                              -> rabbit_framing:amqp_method_name()).
--spec(polite_pause/0 :: () -> 'done').
--spec(polite_pause/1 :: (non_neg_integer()) -> 'done').
--spec(die/1 ::
-        (rabbit_framing:amqp_exception()) -> channel_or_connection_exit()).
-
--spec(quit/1 :: (integer()) -> no_return()).
-
--spec(frame_error/2 :: (rabbit_framing:amqp_method_name(), binary())
-                       -> rabbit_types:connection_exit()).
--spec(amqp_error/4 ::
+-type ok_or_error() :: rabbit_types:ok_or_error(any()).
+-type thunk(T) :: fun(() -> T).
+-type resource_name() :: binary().
+-type channel_or_connection_exit()
+      :: rabbit_types:channel_exit() | rabbit_types:connection_exit().
+-type digraph_label() :: term().
+-type graph_vertex_fun() ::
+        fun (({atom(), [term()]}) -> [{digraph:vertex(), digraph_label()}]).
+-type graph_edge_fun() ::
+        fun (({atom(), [term()]}) -> [{digraph:vertex(), digraph:vertex()}]).
+-type tref() :: {'erlang', reference()} | {timer, timer:tref()}.
+
+-spec method_record_type(rabbit_framing:amqp_method_record()) ->
+          rabbit_framing:amqp_method_name().
+-spec polite_pause() -> 'done'.
+-spec polite_pause(non_neg_integer()) -> 'done'.
+-spec die(rabbit_framing:amqp_exception()) -> channel_or_connection_exit().
+
+-spec quit(integer()) -> no_return().
+
+-spec frame_error(rabbit_framing:amqp_method_name(), binary()) ->
+          rabbit_types:connection_exit().
+-spec amqp_error
         (rabbit_framing:amqp_exception(), string(), [any()],
-         rabbit_framing:amqp_method_name())
-        -> rabbit_types:amqp_error()).
--spec(protocol_error/3 :: (rabbit_framing:amqp_exception(), string(), [any()])
-                          -> channel_or_connection_exit()).
--spec(protocol_error/4 ::
+         rabbit_framing:amqp_method_name()) ->
+            rabbit_types:amqp_error().
+-spec protocol_error(rabbit_framing:amqp_exception(), string(), [any()]) ->
+          channel_or_connection_exit().
+-spec protocol_error
         (rabbit_framing:amqp_exception(), string(), [any()],
-         rabbit_framing:amqp_method_name()) -> channel_or_connection_exit()).
--spec(protocol_error/1 ::
-        (rabbit_types:amqp_error()) -> channel_or_connection_exit()).
--spec(not_found/1 :: (rabbit_types:r(atom())) -> rabbit_types:channel_exit()).
--spec(absent/2 :: (rabbit_types:amqqueue(), rabbit_amqqueue:absent_reason())
-                  -> rabbit_types:channel_exit()).
--spec(type_class/1 :: (rabbit_framing:amqp_field_type()) -> atom()).
--spec(assert_args_equivalence/4 :: (rabbit_framing:amqp_table(),
-                                    rabbit_framing:amqp_table(),
-                                    rabbit_types:r(any()), [binary()]) ->
-                                        'ok' | rabbit_types:connection_exit()).
--spec(assert_field_equivalence/4 ::
+         rabbit_framing:amqp_method_name()) ->
+            channel_or_connection_exit().
+-spec protocol_error(rabbit_types:amqp_error()) ->
+          channel_or_connection_exit().
+-spec not_found(rabbit_types:r(atom())) -> rabbit_types:channel_exit().
+-spec absent(rabbit_types:amqqueue(), rabbit_amqqueue:absent_reason()) ->
+          rabbit_types:channel_exit().
+-spec type_class(rabbit_framing:amqp_field_type()) -> atom().
+-spec assert_args_equivalence
+        (rabbit_framing:amqp_table(), rabbit_framing:amqp_table(),
+         rabbit_types:r(any()), [binary()]) ->
+            'ok' | rabbit_types:connection_exit().
+-spec assert_field_equivalence
         (any(), any(), rabbit_types:r(any()), atom() | binary()) ->
-                                         'ok' | rabbit_types:connection_exit()).
--spec(equivalence_fail/4 ::
+            'ok' | rabbit_types:connection_exit().
+-spec equivalence_fail
         (any(), any(), rabbit_types:r(any()), atom() | binary()) ->
-                                 rabbit_types:connection_exit()).
--spec(dirty_read/1 ::
-        ({atom(), any()}) -> rabbit_types:ok_or_error2(any(), 'not_found')).
--spec(table_lookup/2 ::
-        (rabbit_framing:amqp_table(), binary())
-        -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}).
--spec(set_table_value/4 ::
-        (rabbit_framing:amqp_table(), binary(),
-         rabbit_framing:amqp_field_type(), rabbit_framing:amqp_value())
-        -> rabbit_framing:amqp_table()).
--spec(r/2 :: (rabbit_types:vhost(), K)
-             -> rabbit_types:r3(rabbit_types:vhost(), K, '_')
-                    when is_subtype(K, atom())).
--spec(r/3 ::
-        (rabbit_types:vhost() | rabbit_types:r(atom()), K, resource_name())
-        -> rabbit_types:r3(rabbit_types:vhost(), K, resource_name())
-               when is_subtype(K, atom())).
--spec(r_arg/4 ::
+            rabbit_types:connection_exit().
+-spec dirty_read({atom(), any()}) ->
+          rabbit_types:ok_or_error2(any(), 'not_found').
+-spec table_lookup(rabbit_framing:amqp_table(), binary()) ->
+          'undefined' | {rabbit_framing:amqp_field_type(), any()}.
+-spec set_table_value
+        (rabbit_framing:amqp_table(), binary(), rabbit_framing:amqp_field_type(),
+         rabbit_framing:amqp_value()) ->
+            rabbit_framing:amqp_table().
+-spec r(rabbit_types:vhost(), K) ->
+          rabbit_types:r3(rabbit_types:vhost(), K, '_')
+          when is_subtype(K, atom()).
+-spec r(rabbit_types:vhost() | rabbit_types:r(atom()), K, resource_name()) ->
+          rabbit_types:r3(rabbit_types:vhost(), K, resource_name())
+          when is_subtype(K, atom()).
+-spec r_arg
         (rabbit_types:vhost() | rabbit_types:r(atom()), K,
          rabbit_framing:amqp_table(), binary()) ->
-                      undefined |
-                      rabbit_types:error(
-                        {invalid_type, rabbit_framing:amqp_field_type()}) |
-                      rabbit_types:r(K) when is_subtype(K, atom())).
--spec(rs/1 :: (rabbit_types:r(atom())) -> string()).
--spec(enable_cover/0 :: () -> ok_or_error()).
--spec(start_cover/1 :: ([{string(), string()} | string()]) -> 'ok').
--spec(report_cover/0 :: () -> 'ok').
--spec(enable_cover/1 :: ([file:filename() | atom()]) -> ok_or_error()).
--spec(report_cover/1 :: ([file:filename() | atom()]) -> 'ok').
--spec(throw_on_error/2 ::
-        (atom(), thunk(rabbit_types:error(any()) | {ok, A} | A)) -> A).
--spec(with_exit_handler/2 :: (thunk(A), thunk(A)) -> A).
--spec(is_abnormal_exit/1 :: (any()) -> boolean()).
--spec(filter_exit_map/2 :: (fun ((A) -> B), [A]) -> [B]).
--spec(with_user/2 :: (rabbit_types:username(), thunk(A)) -> A).
--spec(with_user_and_vhost/3 ::
-        (rabbit_types:username(), rabbit_types:vhost(), thunk(A))
-        -> A).
--spec(execute_mnesia_transaction/1 :: (thunk(A)) -> A).
--spec(execute_mnesia_transaction/2 ::
-        (thunk(A), fun ((A, boolean()) -> B)) -> B).
--spec(execute_mnesia_tx_with_tail/1 ::
-        (thunk(fun ((boolean()) -> B))) -> B | (fun ((boolean()) -> B))).
--spec(ensure_ok/2 :: (ok_or_error(), atom()) -> 'ok').
--spec(tcp_name/3 ::
-        (atom(), inet:ip_address(), rabbit_networking:ip_port())
-        -> atom()).
--spec(format_inet_error/1 :: (atom()) -> string()).
--spec(upmap/2 :: (fun ((A) -> B), [A]) -> [B]).
--spec(map_in_order/2 :: (fun ((A) -> B), [A]) -> [B]).
--spec(table_filter/3:: (fun ((A) -> boolean()), fun ((A, boolean()) -> 'ok'),
-                                                    atom()) -> [A]).
--spec(dirty_read_all/1 :: (atom()) -> [any()]).
--spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom())
-                             -> 'ok' | 'aborted').
--spec(dirty_dump_log/1 :: (file:filename()) -> ok_or_error()).
--spec(format/2 :: (string(), [any()]) -> string()).
--spec(format_many/1 :: ([{string(), [any()]}]) -> string()).
--spec(format_stderr/2 :: (string(), [any()]) -> 'ok').
--spec(unfold/2  :: (fun ((A) -> ({'true', B, A} | 'false')), A) -> {[B], A}).
--spec(ceil/1 :: (number()) -> integer()).
--spec(queue_fold/3 :: (fun ((any(), B) -> B), B, queue:queue()) -> B).
--spec(sort_field_table/1 ::
-        (rabbit_framing:amqp_table()) -> rabbit_framing:amqp_table()).
--spec(pid_to_string/1 :: (pid()) -> string()).
--spec(string_to_pid/1 :: (string()) -> pid()).
--spec(pid_change_node/2 :: (pid(), node()) -> pid()).
--spec(node_to_fake_pid/1 :: (atom()) -> pid()).
--spec(version_compare/2 :: (string(), string()) -> 'lt' | 'eq' | 'gt').
--spec(version_compare/3 ::
-        (string(), string(), ('lt' | 'lte' | 'eq' | 'gte' | 'gt'))
-        -> boolean()).
--spec(version_minor_equivalent/2 :: (string(), string()) -> boolean()).
--spec(dict_cons/3 :: (any(), any(), dict:dict()) -> dict:dict()).
--spec(orddict_cons/3 :: (any(), any(), orddict:orddict()) -> orddict:orddict()).
--spec(gb_trees_cons/3 :: (any(), any(), gb_trees:tree()) -> gb_trees:tree()).
--spec(gb_trees_fold/3 :: (fun ((any(), any(), A) -> A), A, gb_trees:tree())
- -> A).
--spec(gb_trees_foreach/2 ::
-        (fun ((any(), any()) -> any()), gb_trees:tree()) -> 'ok').
--spec(all_module_attributes/1 ::
-        (atom()) -> [{atom(), atom(), [term()]}]).
--spec(build_acyclic_graph/3 ::
-        (graph_vertex_fun(), graph_edge_fun(), [{atom(), [term()]}])
-        -> rabbit_types:ok_or_error2(digraph:graph(),
-                                     {'vertex', 'duplicate', digraph:vertex()} |
-                                     {'edge', ({bad_vertex, digraph:vertex()} |
-                                               {bad_edge, [digraph:vertex()]}),
-                                      digraph:vertex(), digraph:vertex()})).
--spec(const/1 :: (A) -> thunk(A)).
--spec(ntoa/1 :: (inet:ip_address()) -> string()).
--spec(ntoab/1 :: (inet:ip_address()) -> string()).
--spec(is_process_alive/1 :: (pid()) -> boolean()).
--spec(pget/2 :: (term(), [term()]) -> term()).
--spec(pget/3 :: (term(), [term()], term()) -> term()).
--spec(pget_or_die/2 :: (term(), [term()]) -> term() | no_return()).
--spec(pmerge/3 :: (term(), term(), [term()]) -> [term()]).
--spec(plmerge/2 :: ([term()], [term()]) -> [term()]).
--spec(pset/3 :: (term(), term(), [term()]) -> [term()]).
--spec(format_message_queue/2 :: (any(), priority_queue:q()) -> term()).
--spec(append_rpc_all_nodes/4 :: ([node()], atom(), atom(), [any()]) -> [any()]).
--spec(os_cmd/1 :: (string()) -> string()).
--spec(is_os_process_alive/1 :: (non_neg_integer()) -> boolean()).
--spec(gb_sets_difference/2 :: (gb_sets:set(), gb_sets:set()) -> gb_sets:set()).
--spec(version/0 :: () -> string()).
--spec(otp_release/0 :: () -> string()).
--spec(which_applications/0 :: () -> [{atom(), string(), string()}]).
--spec(sequence_error/1 :: ([({'error', any()} | any())])
-                       -> {'error', any()} | any()).
--spec(json_encode/1 :: (any()) -> {'ok', string()} | {'error', any()}).
--spec(json_decode/1 :: (string()) -> {'ok', any()} | 'error').
--spec(json_to_term/1 :: (any()) -> any()).
--spec(term_to_json/1 :: (any()) -> any()).
--spec(check_expiry/1 :: (integer()) -> rabbit_types:ok_or_error(any())).
--spec(base64url/1 :: (binary()) -> string()).
--spec(interval_operation/5 ::
-        ({atom(), atom(), any()}, float(), non_neg_integer(), non_neg_integer(), non_neg_integer())
-        -> {any(), non_neg_integer()}).
--spec(ensure_timer/4 :: (A, non_neg_integer(), non_neg_integer(), any()) -> A).
--spec(stop_timer/2 :: (A, non_neg_integer()) -> A).
--spec(send_after/3 :: (non_neg_integer(), pid(), any()) -> tref()).
--spec(cancel_timer/1 :: (tref()) -> 'ok').
--spec(get_parent/0 :: () -> pid()).
--spec(store_proc_name/2 :: (atom(), rabbit_types:proc_name()) -> ok).
--spec(store_proc_name/1 :: (rabbit_types:proc_type_and_name()) -> ok).
--spec(moving_average/4 :: (float(), float(), float(), float() | 'undefined')
-                          -> float()).
--spec(get_env/3 :: (atom(), atom(), term())  -> term()).
--spec(get_channel_operation_timeout/0 :: () -> non_neg_integer()).
--spec(random/1 :: (non_neg_integer()) -> non_neg_integer()).
-
--endif.
+            undefined |
+            rabbit_types:error(
+              {invalid_type, rabbit_framing:amqp_field_type()}) |
+            rabbit_types:r(K) when is_subtype(K, atom()).
+-spec rs(rabbit_types:r(atom())) -> string().
+-spec enable_cover() -> ok_or_error().
+-spec start_cover([{string(), string()} | string()]) -> 'ok'.
+-spec report_cover() -> 'ok'.
+-spec enable_cover([file:filename() | atom()]) -> ok_or_error().
+-spec report_cover([file:filename() | atom()]) -> 'ok'.
+-spec throw_on_error
+        (atom(), thunk(rabbit_types:error(any()) | {ok, A} | A)) -> A.
+-spec with_exit_handler(thunk(A), thunk(A)) -> A.
+-spec is_abnormal_exit(any()) -> boolean().
+-spec filter_exit_map(fun ((A) -> B), [A]) -> [B].
+-spec with_user(rabbit_types:username(), thunk(A)) -> A.
+-spec with_user_and_vhost
+        (rabbit_types:username(), rabbit_types:vhost(), thunk(A)) -> A.
+-spec execute_mnesia_transaction(thunk(A)) -> A.
+-spec execute_mnesia_transaction(thunk(A), fun ((A, boolean()) -> B)) -> B.
+-spec execute_mnesia_tx_with_tail
+        (thunk(fun ((boolean()) -> B))) -> B | (fun ((boolean()) -> B)).
+-spec ensure_ok(ok_or_error(), atom()) -> 'ok'.
+-spec tcp_name(atom(), inet:ip_address(), rabbit_networking:ip_port()) ->
+          atom().
+-spec format_inet_error(atom()) -> string().
+-spec upmap(fun ((A) -> B), [A]) -> [B].
+-spec map_in_order(fun ((A) -> B), [A]) -> [B].
+-spec table_filter
+        (fun ((A) -> boolean()), fun ((A, boolean()) -> 'ok'), atom()) -> [A].
+-spec dirty_read_all(atom()) -> [any()].
+-spec dirty_foreach_key(fun ((any()) -> any()), atom()) ->
+          'ok' | 'aborted'.
+-spec dirty_dump_log(file:filename()) -> ok_or_error().
+-spec format(string(), [any()]) -> string().
+-spec format_many([{string(), [any()]}]) -> string().
+-spec format_stderr(string(), [any()]) -> 'ok'.
+-spec unfold (fun ((A) -> ({'true', B, A} | 'false')), A) -> {[B], A}.
+-spec ceil(number()) -> integer().
+-spec queue_fold(fun ((any(), B) -> B), B, ?QUEUE_TYPE()) -> B.
+-spec sort_field_table(rabbit_framing:amqp_table()) ->
+          rabbit_framing:amqp_table().
+-spec pid_to_string(pid()) -> string().
+-spec string_to_pid(string()) -> pid().
+-spec pid_change_node(pid(), node()) -> pid().
+-spec node_to_fake_pid(atom()) -> pid().
+-spec version_compare(string(), string()) -> 'lt' | 'eq' | 'gt'.
+-spec version_compare
+        (string(), string(), ('lt' | 'lte' | 'eq' | 'gte' | 'gt')) -> boolean().
+-spec version_minor_equivalent(string(), string()) -> boolean().
+-spec dict_cons(any(), any(), ?DICT_TYPE()) -> ?DICT_TYPE().
+-spec orddict_cons(any(), any(), orddict:orddict()) -> orddict:orddict().
+-spec gb_trees_cons(any(), any(), gb_trees:tree()) -> gb_trees:tree().
+-spec gb_trees_fold(fun ((any(), any(), A) -> A), A, gb_trees:tree()) -> A.
+-spec gb_trees_foreach(fun ((any(), any()) -> any()), gb_trees:tree()) ->
+          'ok'.
+-spec all_module_attributes(atom()) -> [{atom(), atom(), [term()]}].
+-spec build_acyclic_graph
+        (graph_vertex_fun(), graph_edge_fun(), [{atom(), [term()]}]) ->
+            rabbit_types:ok_or_error2(
+              digraph:graph(),
+              {'vertex', 'duplicate', digraph:vertex()} |
+              {'edge',
+                ({bad_vertex, digraph:vertex()} |
+                 {bad_edge, [digraph:vertex()]}),
+                digraph:vertex(), digraph:vertex()}).
+-spec const(A) -> thunk(A).
+-spec ntoa(inet:ip_address()) -> string().
+-spec ntoab(inet:ip_address()) -> string().
+-spec is_process_alive(pid()) -> boolean().
+-spec pget(term(), [term()]) -> term().
+-spec pget(term(), [term()], term()) -> term().
+-spec pget_or_die(term(), [term()]) -> term() | no_return().
+-spec pmerge(term(), term(), [term()]) -> [term()].
+-spec plmerge([term()], [term()]) -> [term()].
+-spec pset(term(), term(), [term()]) -> [term()].
+-spec format_message_queue(any(), priority_queue:q()) -> term().
+-spec append_rpc_all_nodes([node()], atom(), atom(), [any()]) -> [any()].
+-spec os_cmd(string()) -> string().
+-spec is_os_process_alive(non_neg_integer()) -> boolean().
+-spec gb_sets_difference(?GB_SET_TYPE(), ?GB_SET_TYPE()) -> ?GB_SET_TYPE().
+-spec version() -> string().
+-spec otp_release() -> string().
+-spec which_applications() -> [{atom(), string(), string()}].
+-spec sequence_error([({'error', any()} | any())]) ->
+          {'error', any()} | any().
+-spec json_encode(any()) -> {'ok', string()} | {'error', any()}.
+-spec json_decode(string()) -> {'ok', any()} | 'error'.
+-spec json_to_term(any()) -> any().
+-spec term_to_json(any()) -> any().
+-spec check_expiry(integer()) -> rabbit_types:ok_or_error(any()).
+-spec base64url(binary()) -> string().
+-spec interval_operation
+        ({atom(), atom(), any()}, float(), non_neg_integer(), non_neg_integer(),
+         non_neg_integer()) ->
+            {any(), non_neg_integer()}.
+-spec ensure_timer(A, non_neg_integer(), non_neg_integer(), any()) -> A.
+-spec stop_timer(A, non_neg_integer()) -> A.
+-spec send_after(non_neg_integer(), pid(), any()) -> tref().
+-spec cancel_timer(tref()) -> 'ok'.
+-spec get_parent() -> pid().
+-spec store_proc_name(atom(), rabbit_types:proc_name()) -> ok.
+-spec store_proc_name(rabbit_types:proc_type_and_name()) -> ok.
+-spec get_proc_name() -> rabbit_types:proc_name().
+-spec moving_average(float(), float(), float(), float() | 'undefined') ->
+          float().
+-spec get_env(atom(), atom(), term())  -> term().
+-spec get_channel_operation_timeout() -> non_neg_integer().
+-spec random(non_neg_integer()) -> non_neg_integer().
+-spec rpc_call(node(), atom(), atom(), [any()]) -> any().
+-spec rpc_call(node(), atom(), atom(), [any()], number()) -> any().
+-spec rpc_call
+        (node(), atom(), atom(), [any()], reference(), pid(), number()) -> any().
+-spec report_default_thread_pool_size() -> 'ok'.
+-spec get_gc_info(pid()) -> integer().
 
 %%----------------------------------------------------------------------------
 
@@ -689,6 +692,9 @@ queue_fold(Fun, Init, Q) ->
 sort_field_table(Arguments) ->
     lists:keysort(1, Arguments).
 
+atom_to_binary(A) ->
+    list_to_binary(atom_to_list(A)).
+
 %% This provides a string representation of a pid that is the same
 %% regardless of what node we are running on. The representation also
 %% permits easy identification of the pid's node.
@@ -1012,10 +1018,11 @@ otp_release() ->
 
 %% application:which_applications(infinity) is dangerous, since it can
 %% cause deadlocks on shutdown. So we have to use a timeout variant,
-%% but w/o creating spurious timeout errors.
+%% but w/o creating spurious timeout errors. The timeout value is twice
+%% that of gen_server:call/2.
 which_applications() ->
     try
-        application:which_applications()
+        application:which_applications(10000)
     catch
         exit:{timeout, _} -> []
     end.
@@ -1049,8 +1056,9 @@ json_to_term(V) when is_binary(V) orelse is_number(V) orelse V =:= null orelse
                      V =:= true orelse V =:= false ->
     V.
 
-%% This has the flaw that empty lists will never be JSON objects, so use with
-%% care.
+%% You can use the empty_struct value to represent empty JSON objects.
+term_to_json(empty_struct) ->
+    {struct, []};
 term_to_json([{_, _}|_] = L) ->
     {struct, [{K, term_to_json(V)} || {K, V} <- L]};
 term_to_json(L) when is_list(L) ->
@@ -1117,6 +1125,14 @@ cancel_timer({timer, Ref})  -> {ok, cancel} = timer:cancel(Ref),
 store_proc_name(Type, ProcName) -> store_proc_name({Type, ProcName}).
 store_proc_name(TypeProcName)   -> put(process_name, TypeProcName).
 
+get_proc_name() ->
+    case get(process_name) of
+        undefined ->
+            undefined;
+        {_Type, Name} ->
+            {ok, Name}
+    end.
+
 %% application:get_env/3 is only available in R16B01 or later.
 get_env(Application, Key, Def) ->
     case application:get_env(Application, Key) of
@@ -1151,14 +1167,55 @@ moving_average(Time,  HalfLife,  Next, Current) ->
     Next * (1 - Weight) + Current * Weight.
 
 random(N) ->
-    case get(random_seed) of
+    rand_compat:uniform(N).
+
+%% Moved from rabbit/src/rabbit_cli.erl
+%% If the server we are talking to has non-standard net_ticktime, and
+%% our connection lasts a while, we could get disconnected because of
+%% a timeout unless we set our ticktime to be the same. So let's do
+%% that.
+rpc_call(Node, Mod, Fun, Args) ->
+    rpc_call(Node, Mod, Fun, Args, ?RPC_TIMEOUT).
+
+rpc_call(Node, Mod, Fun, Args, Timeout) ->
+    case rpc:call(Node, net_kernel, get_net_ticktime, [], Timeout) of
+        {badrpc, _} = E -> E;
+        Time            -> net_kernel:set_net_ticktime(Time, 0),
+                           rpc:call(Node, Mod, Fun, Args, Timeout)
+    end.
+
+rpc_call(Node, Mod, Fun, Args, Ref, Pid, Timeout) ->
+    rpc_call(Node, Mod, Fun, Args++[Ref, Pid], Timeout).
+
+guess_number_of_cpu_cores() ->
+    case erlang:system_info(logical_processors_available) of
+        unknown -> % Happens on Mac OS X.
+            erlang:system_info(schedulers);
+        N -> N
+    end.
+
+%% Discussion of choosen values is at
+%% https://github.com/rabbitmq/rabbitmq-server/issues/151
+guess_default_thread_pool_size() ->
+    PoolSize = 16 * guess_number_of_cpu_cores(),
+    min(1024, max(64, PoolSize)).
+
+report_default_thread_pool_size() ->
+    io:format("~b", [guess_default_thread_pool_size()]),
+    erlang:halt(0),
+    ok.
+
+get_gc_info(Pid) ->
+    {garbage_collection, GC} = erlang:process_info(Pid, garbage_collection),
+    case proplists:get_value(max_heap_size, GC) of
+        I when is_integer(I) ->
+            GC;
         undefined ->
-            random:seed(erlang:phash2([node()]),
-                        time_compat:monotonic_time(),
-                        time_compat:unique_integer());
-        _ -> ok
-    end,
-    random:uniform(N).
+            GC;
+        Map ->
+            lists:keyreplace(max_heap_size, 1, GC,
+                             {max_heap_size, maps:get(size, Map)})
+    end.
 
 %% -------------------------------------------------------------------------
 %% Begin copypasta from gen_server2.erl
index b230ca880ab6ed44a94f250ca869cdf8802a3290..b33cd4f81dd15ea983e8ec45435918040d2a7f48 100644 (file)
@@ -18,8 +18,6 @@
 
 -include("rabbit_msg_store.hrl").
 
--ifdef(use_specs).
-
 -type(dir() :: any()).
 -type(index_state() :: any()).
 -type(keyvalue() :: any()).
 -callback delete_object(keyvalue(), index_state()) -> 'ok'.
 -callback delete_by_file(fieldvalue(), index_state()) -> 'ok'.
 -callback terminate(index_state()) -> any().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{new,            1},
-     {recover,        1},
-     {lookup,         2},
-     {insert,         2},
-     {update,         2},
-     {update_fields,  3},
-     {delete,         2},
-     {delete_by_file, 2},
-     {terminate,      1}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
index e77f2ab0922fc9e7d5c47aef72b4a596b5bc208d..792eb55fb8120566bcfd3528d539ab1e370427a1 100644 (file)
 
 %%---------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([socket/0]).
 
--type(stat_option() ::
+-type stat_option() ::
         'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' |
-        'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend').
--type(ok_val_or_error(A) :: rabbit_types:ok_or_error2(A, any())).
--type(ok_or_any_error() :: rabbit_types:ok_or_error(any())).
--type(socket() :: port() | ssl:sslsocket()).
--type(opts() :: [{atom(), any()} |
-                 {raw, non_neg_integer(), non_neg_integer(), binary()}]).
--type(host_or_ip() :: binary() | inet:ip_address()).
--spec(is_ssl/1 :: (socket()) -> boolean()).
--spec(ssl_info/1 :: (socket())
-                    -> 'nossl' | ok_val_or_error(
-                                   [{atom(), any()}])).
--spec(controlling_process/2 :: (socket(), pid()) -> ok_or_any_error()).
--spec(getstat/2 ::
-        (socket(), [stat_option()])
-        -> ok_val_or_error([{stat_option(), integer()}])).
--spec(recv/1 :: (socket()) ->
-                     {'data', [char()] | binary()} | 'closed' |
-                     rabbit_types:error(any()) | {'other', any()}).
--spec(sync_recv/2 :: (socket(), integer()) -> rabbit_types:ok(binary()) |
-                                              rabbit_types:error(any())).
--spec(async_recv/3 ::
-        (socket(), integer(), timeout()) -> rabbit_types:ok(any())).
--spec(port_command/2 :: (socket(), iolist()) -> 'true').
--spec(getopts/2 :: (socket(), [atom() | {raw,
-                                         non_neg_integer(),
-                                         non_neg_integer(),
-                                         non_neg_integer() | binary()}])
-                   -> ok_val_or_error(opts())).
--spec(setopts/2 :: (socket(), opts()) -> ok_or_any_error()).
--spec(send/2 :: (socket(), binary() | iolist()) -> ok_or_any_error()).
--spec(close/1 :: (socket()) -> ok_or_any_error()).
--spec(fast_close/1 :: (socket()) -> ok_or_any_error()).
--spec(sockname/1 ::
-        (socket())
-        -> ok_val_or_error({inet:ip_address(), rabbit_networking:ip_port()})).
--spec(peername/1 ::
-        (socket())
-        -> ok_val_or_error({inet:ip_address(), rabbit_networking:ip_port()})).
--spec(peercert/1 ::
-        (socket())
-        -> 'nossl' | ok_val_or_error(rabbit_ssl:certificate())).
--spec(connection_string/2 ::
-        (socket(), 'inbound' | 'outbound') -> ok_val_or_error(string())).
--spec(socket_ends/2 ::
-        (socket(), 'inbound' | 'outbound')
-        -> ok_val_or_error({host_or_ip(), rabbit_networking:ip_port(),
-                            host_or_ip(), rabbit_networking:ip_port()})).
--spec(is_loopback/1 :: (socket() | inet:ip_address()) -> boolean()).
--spec(accept_ack/2 :: (any(), socket()) -> ok).
-
--endif.
+        'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend'.
+-type ok_val_or_error(A) :: rabbit_types:ok_or_error2(A, any()).
+-type ok_or_any_error() :: rabbit_types:ok_or_error(any()).
+-type socket() :: port() | ssl:sslsocket().
+-type opts() :: [{atom(), any()} |
+                 {raw, non_neg_integer(), non_neg_integer(), binary()}].
+-type host_or_ip() :: binary() | inet:ip_address().
+-spec is_ssl(socket()) -> boolean().
+-spec ssl_info(socket()) -> 'nossl' | ok_val_or_error([{atom(), any()}]).
+-spec controlling_process(socket(), pid()) -> ok_or_any_error().
+-spec getstat(socket(), [stat_option()]) ->
+          ok_val_or_error([{stat_option(), integer()}]).
+-spec recv(socket()) ->
+          {'data', [char()] | binary()} |
+          'closed' |
+          rabbit_types:error(any()) |
+          {'other', any()}.
+-spec sync_recv(socket(), integer()) ->
+          rabbit_types:ok(binary()) |
+          rabbit_types:error(any()).
+-spec async_recv(socket(), integer(), timeout()) ->
+          rabbit_types:ok(any()).
+-spec port_command(socket(), iolist()) -> 'true'.
+-spec getopts
+        (socket(),
+         [atom() |
+          {raw, non_neg_integer(), non_neg_integer(),
+           non_neg_integer() | binary()}]) ->
+            ok_val_or_error(opts()).
+-spec setopts(socket(), opts()) -> ok_or_any_error().
+-spec send(socket(), binary() | iolist()) -> ok_or_any_error().
+-spec close(socket()) -> ok_or_any_error().
+-spec fast_close(socket()) -> ok_or_any_error().
+-spec sockname(socket()) ->
+          ok_val_or_error({inet:ip_address(), rabbit_networking:ip_port()}).
+-spec peername(socket()) ->
+          ok_val_or_error({inet:ip_address(), rabbit_networking:ip_port()}).
+-spec peercert(socket()) ->
+          'nossl' | ok_val_or_error(rabbit_ssl:certificate()).
+-spec connection_string(socket(), 'inbound' | 'outbound') ->
+          ok_val_or_error(string()).
+-spec socket_ends(socket(), 'inbound' | 'outbound') ->
+          ok_val_or_error({host_or_ip(), rabbit_networking:ip_port(),
+                           host_or_ip(), rabbit_networking:ip_port()}).
+-spec is_loopback(socket() | inet:ip_address()) -> boolean().
+-spec accept_ack(any(), socket()) -> ok.
 
 %%---------------------------------------------------------------------------
 
index 47309cc253c93da56399b4e6c4af331f1a429072..5bf30ff5e569d9638c55a1c34527f6658bcd75c4 100644 (file)
 -include("rabbit.hrl").
 -include_lib("kernel/include/inet.hrl").
 
--define(FIRST_TEST_BIND_PORT, 10000).
+%% IANA-suggested ephemeral port range is 49152 to 65535
+-define(FIRST_TEST_BIND_PORT, 49152).
 
 %% POODLE
 -define(BAD_SSL_PROTOCOL_VERSIONS, [sslv3]).
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([ip_port/0, hostname/0]).
 
--type(hostname() :: inet:hostname()).
--type(ip_port() :: inet:port_number()).
+-type hostname() :: inet:hostname().
+-type ip_port() :: inet:port_number().
 
--type(family() :: atom()).
--type(listener_config() :: ip_port() |
+-type family() :: atom().
+-type listener_config() :: ip_port() |
                            {hostname(), ip_port()} |
-                           {hostname(), ip_port(), family()}).
--type(address() :: {inet:ip_address(), ip_port(), family()}).
--type(name_prefix() :: atom()).
--type(protocol() :: atom()).
--type(label() :: string()).
-
--spec(start_tcp_listener/1 :: (listener_config()) -> 'ok').
--spec(start_ssl_listener/2 ::
-        (listener_config(), rabbit_types:infos()) -> 'ok').
--spec(stop_tcp_listener/1 :: (listener_config()) -> 'ok').
--spec(active_listeners/0 :: () -> [rabbit_types:listener()]).
--spec(node_listeners/1 :: (node()) -> [rabbit_types:listener()]).
--spec(register_connection/1 :: (pid()) -> ok).
--spec(unregister_connection/1 :: (pid()) -> ok).
--spec(connections/0 :: () -> [rabbit_types:connection()]).
--spec(connections_local/0 :: () -> [rabbit_types:connection()]).
--spec(connection_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(connection_info/1 ::
-        (rabbit_types:connection()) -> rabbit_types:infos()).
--spec(connection_info/2 ::
-        (rabbit_types:connection(), rabbit_types:info_keys())
-        -> rabbit_types:infos()).
--spec(connection_info_all/0 :: () -> [rabbit_types:infos()]).
--spec(connection_info_all/1 ::
-        (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
--spec(connection_info_all/3 ::
-        (rabbit_types:info_keys(), reference(), pid()) -> 'ok').
--spec(close_connection/2 :: (pid(), string()) -> 'ok').
--spec(force_connection_event_refresh/1 :: (reference()) -> 'ok').
-
--spec(on_node_down/1 :: (node()) -> 'ok').
--spec(tcp_listener_addresses/1 :: (listener_config()) -> [address()]).
--spec(tcp_listener_spec/9 ::
-        (name_prefix(), address(), [gen_tcp:listen_option()], module(), module(), protocol(), any(),
-         non_neg_integer(), label()) -> supervisor:child_spec()).
--spec(ensure_ssl/0 :: () -> rabbit_types:infos()).
--spec(fix_ssl_options/1 :: (rabbit_types:infos()) -> rabbit_types:infos()).
--spec(poodle_check/1 :: (atom()) -> 'ok' | 'danger').
-
--spec(boot/0 :: () -> 'ok').
--spec(tcp_listener_started/3 ::
-       (_,
+                           {hostname(), ip_port(), family()}.
+-type address() :: {inet:ip_address(), ip_port(), family()}.
+-type name_prefix() :: atom().
+-type protocol() :: atom().
+-type label() :: string().
+
+-spec start_tcp_listener(listener_config(), integer()) -> 'ok'.
+-spec start_ssl_listener
+        (listener_config(), rabbit_types:infos(), integer()) -> 'ok'.
+-spec stop_tcp_listener(listener_config()) -> 'ok'.
+-spec active_listeners() -> [rabbit_types:listener()].
+-spec node_listeners(node()) -> [rabbit_types:listener()].
+-spec register_connection(pid()) -> ok.
+-spec unregister_connection(pid()) -> ok.
+-spec connections() -> [rabbit_types:connection()].
+-spec connections_local() -> [rabbit_types:connection()].
+-spec connection_info_keys() -> rabbit_types:info_keys().
+-spec connection_info(rabbit_types:connection()) -> rabbit_types:infos().
+-spec connection_info(rabbit_types:connection(), rabbit_types:info_keys()) ->
+          rabbit_types:infos().
+-spec connection_info_all() -> [rabbit_types:infos()].
+-spec connection_info_all(rabbit_types:info_keys()) ->
+          [rabbit_types:infos()].
+-spec connection_info_all(rabbit_types:info_keys(), reference(), pid()) ->
+          'ok'.
+-spec close_connection(pid(), string()) -> 'ok'.
+-spec force_connection_event_refresh(reference()) -> 'ok'.
+
+-spec on_node_down(node()) -> 'ok'.
+-spec tcp_listener_addresses(listener_config()) -> [address()].
+-spec tcp_listener_spec
+        (name_prefix(), address(), [gen_tcp:listen_option()], module(), module(),
+         protocol(), any(), non_neg_integer(), label()) ->
+            supervisor:child_spec().
+-spec ensure_ssl() -> rabbit_types:infos().
+-spec fix_ssl_options(rabbit_types:infos()) -> rabbit_types:infos().
+-spec poodle_check(atom()) -> 'ok' | 'danger'.
+
+-spec boot() -> 'ok'.
+-spec tcp_listener_started
+        (_,
          string() |
-        {byte(),byte(),byte(),byte()} |
-        {char(),char(),char(),char(),char(),char(),char(),char()},
-        _) ->
-                                    'ok').
--spec(tcp_listener_stopped/3 ::
-       (_,
+         {byte(),byte(),byte(),byte()} |
+         {char(),char(),char(),char(),char(),char(),char(),char()}, _) ->
+            'ok'.
+-spec tcp_listener_stopped
+        (_,
          string() |
-        {byte(),byte(),byte(),byte()} |
-        {char(),char(),char(),char(),char(),char(),char(),char()},
-        _) ->
-                                    'ok').
-
--endif.
+         {byte(),byte(),byte(),byte()} |
+         {char(),char(),char(),char(),char(),char(),char(),char()},
+         _) ->
+            'ok'.
 
 %%----------------------------------------------------------------------------
 
@@ -187,32 +182,20 @@ fix_ssl_options(Config) ->
 fix_verify_fun(SslOptsConfig) ->
     %% Starting with ssl 4.0.1 in Erlang R14B, the verify_fun function
     %% takes 3 arguments and returns a tuple.
-    {ok, SslAppVer} = application:get_key(ssl, vsn),
-    UseNewVerifyFun = rabbit_misc:version_compare(SslAppVer, "4.0.1", gte),
     case rabbit_misc:pget(verify_fun, SslOptsConfig) of
         {Module, Function, InitialUserState} ->
-            Fun = make_verify_fun(Module, Function, InitialUserState,
-                                  UseNewVerifyFun),
+            Fun = make_verify_fun(Module, Function, InitialUserState),
             rabbit_misc:pset(verify_fun, Fun, SslOptsConfig);
-        {Module, Function} ->
-            Fun = make_verify_fun(Module, Function, none,
-                                  UseNewVerifyFun),
+        {Module, Function} when is_atom(Module) ->
+            Fun = make_verify_fun(Module, Function, none),
             rabbit_misc:pset(verify_fun, Fun, SslOptsConfig);
-        undefined when UseNewVerifyFun ->
+        {Verifyfun, _InitialUserState} when is_function(Verifyfun, 3) ->
             SslOptsConfig;
         undefined ->
-            % unknown_ca errors are silently ignored prior to R14B unless we
-            % supply this verify_fun - remove when at least R14B is required
-            case proplists:get_value(verify, SslOptsConfig, verify_none) of
-                verify_none -> SslOptsConfig;
-                verify_peer -> [{verify_fun, fun([])    -> true;
-                                                ([_|_]) -> false
-                                             end}
-                                | SslOptsConfig]
-            end
+            SslOptsConfig
     end.
 
-make_verify_fun(Module, Function, InitialUserState, UseNewVerifyFun) ->
+make_verify_fun(Module, Function, InitialUserState) ->
     try
         %% Preload the module: it is required to use
         %% erlang:function_exported/3.
@@ -226,7 +209,7 @@ make_verify_fun(Module, Function, InitialUserState, UseNewVerifyFun) ->
     NewForm = erlang:function_exported(Module, Function, 3),
     OldForm = erlang:function_exported(Module, Function, 1),
     case {NewForm, OldForm} of
-        {true, _} when UseNewVerifyFun ->
+        {true, _} ->
             %% This verify_fun is supported by Erlang R14B+ (ssl
             %% 4.0.1 and later).
             Fun = fun(OtpCert, Event, UserState) ->
@@ -234,23 +217,16 @@ make_verify_fun(Module, Function, InitialUserState, UseNewVerifyFun) ->
             end,
             {Fun, InitialUserState};
         {_, true} ->
-            %% This verify_fun is supported by:
-            %%     o  Erlang up-to R13B;
-            %%     o  Erlang R14B+ for undocumented backward
-            %%        compatibility.
+            %% This verify_fun is supported by Erlang R14B+ for 
+            %% undocumented backward compatibility.
             %%
             %% InitialUserState is ignored in this case.
-            fun(ErrorList) ->
-                    Module:Function(ErrorList)
+            fun(Args) ->
+                    Module:Function(Args)
             end;
-        {_, false} when not UseNewVerifyFun ->
-            rabbit_log:error("SSL verify_fun: ~s:~s/1 form required "
-              "for Erlang R13B~n", [Module, Function]),
-            throw({error, {invalid_verify_fun, old_form_required}});
         _ ->
-            Arity = case UseNewVerifyFun of true -> 3; _ -> 1 end,
-            rabbit_log:error("SSL verify_fun: no ~s:~s/~b exported~n",
-              [Module, Function, Arity]),
+            rabbit_log:error("SSL verify_fun: no ~s:~s/3 exported~n",
+              [Module, Function]),
             throw({error, {invalid_verify_fun, function_not_exported}})
     end.
 
index b1370db48d0f4fc396cfca546d91f7e8eb7ba38e..70a5355d9fc692749860bb6d9e4a357fb5d18fa0 100644 (file)
 
 -define(EPMD_TIMEOUT, 30000).
 -define(TCP_DIAGNOSTIC_TIMEOUT, 5000).
+-define(ERROR_LOGGER_HANDLER, rabbit_error_logger_handler).
 
 %%----------------------------------------------------------------------------
 %% Specs
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(names/1 :: (string()) -> rabbit_types:ok_or_error2(
-                                 [{string(), integer()}], term())).
--spec(diagnostics/1 :: ([node()]) -> string()).
--spec(make/1 :: ({string(), string()} | string()) -> node()).
--spec(parts/1 :: (node() | string()) -> {string(), string()}).
--spec(cookie_hash/0 :: () -> string()).
--spec(is_running/2 :: (node(), atom()) -> boolean()).
--spec(is_process_running/2 :: (node(), atom()) -> boolean()).
--spec(cluster_name/0 :: () -> binary()).
--spec(set_cluster_name/1 :: (binary()) -> 'ok').
--spec(ensure_epmd/0 :: () -> 'ok').
--spec(all_running/0 :: () -> [node()]).
-
--endif.
+-spec names(string()) ->
+          rabbit_types:ok_or_error2([{string(), integer()}], term()).
+-spec diagnostics([node()]) -> string().
+-spec make({string(), string()} | string()) -> node().
+-spec parts(node() | string()) -> {string(), string()}.
+-spec cookie_hash() -> string().
+-spec is_running(node(), atom()) -> boolean().
+-spec is_process_running(node(), atom()) -> boolean().
+-spec cluster_name() -> binary().
+-spec set_cluster_name(binary()) -> 'ok'.
+-spec ensure_epmd() -> 'ok'.
+-spec all_running() -> [node()].
 
 %%----------------------------------------------------------------------------
 
@@ -62,12 +59,21 @@ names(Hostname) ->
     end.
 
 diagnostics(Nodes) ->
+    verbose_erlang_distribution(true),
     NodeDiags = [{"~nDIAGNOSTICS~n===========~n~n"
                   "attempted to contact: ~p~n", [Nodes]}] ++
         [diagnostics_node(Node) || Node <- Nodes] ++
         current_node_details(),
+    verbose_erlang_distribution(false),
     rabbit_misc:format_many(lists:flatten(NodeDiags)).
 
+verbose_erlang_distribution(true) ->
+    net_kernel:verbose(1),
+    error_logger:add_report_handler(?ERROR_LOGGER_HANDLER);
+verbose_erlang_distribution(false) ->
+    net_kernel:verbose(0),
+    error_logger:delete_report_handler(?ERROR_LOGGER_HANDLER).
+
 current_node_details() ->
     [{"~ncurrent node details:~n- node name: ~w", [node()]},
      case init:get_argument(home) of
@@ -136,11 +142,7 @@ dist_broken_diagnostics(Name, Host, NamePorts) ->
             [{"  * epmd reports node '~s' running on port ~b", [Name, Port]} |
              case diagnose_connect(Host, Port) of
                  ok ->
-                     [{"  * TCP connection succeeded but Erlang distribution "
-                       "failed~n"
-                       "  * suggestion: hostname mismatch?~n"
-                       "  * suggestion: is the cookie set correctly?~n"
-                       "  * suggestion: is the Erlang distribution using TLS?", []}];
+                     connection_succeeded_diagnostics();
                  {error, Reason} ->
                      [{"  * can't establish TCP connection, reason: ~s~n"
                        "  * suggestion: blocked by firewall?",
@@ -148,6 +150,20 @@ dist_broken_diagnostics(Name, Host, NamePorts) ->
              end]
     end.
 
+connection_succeeded_diagnostics() ->
+    case gen_event:call(error_logger, ?ERROR_LOGGER_HANDLER, get_connection_report) of
+        [] ->
+            [{"  * TCP connection succeeded but Erlang distribution "
+              "failed~n"
+              "  * suggestion: hostname mismatch?~n"
+              "  * suggestion: is the cookie set correctly?~n"
+              "  * suggestion: is the Erlang distribution using TLS?", []}];
+        Report ->
+            [{"  * TCP connection succeeded but Erlang distribution "
+              "failed~n", []}]
+                ++ Report
+    end.
+
 diagnose_connect(Host, Port) ->
     case inet:gethostbyname(Host) of
         {ok, #hostent{h_addrtype = Family}} ->
index 54d2535ea5077f90bde8046b3b6dd5be510ae28d..fd2ba7027e6334e219b68ffd03cecbd01f2e8a86 100644 (file)
 -module(rabbit_password_hashing).
 -include("rabbit.hrl").
 
--ifdef(use_specs).
-
 -callback hash(rabbit_types:password()) -> rabbit_types:password_hash().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{hash, 1}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
index bd8906088e1864a506a27f74f09291b6864cb793..110a26c9f95769bc8892dc2011bf7e653482694a 100644 (file)
 
 -module(rabbit_policy_validator).
 
--ifdef(use_specs).
-
 -export_type([validate_results/0]).
 
 -type(validate_results() ::
         'ok' | {error, string(), [term()]} | [validate_results()]).
 
 -callback validate_policy([{binary(), term()}]) -> validate_results().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [
-     {validate_policy, 1}
-    ];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
index f6677cb88848a3bef44b4b35499a33f71cb3d76c..82a891a03ef0fd6952446bc0433887de232f55eb 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/1 :: (rabbit_types:proc_name()) ->
-                           rabbit_types:ok_pid_or_error()).
--spec(register/2 :: (pid(), pid()) -> 'ok').
--spec(delete_all/1 :: (pid()) -> 'ok').
-
--endif.
+-spec start_link(rabbit_types:proc_name()) -> rabbit_types:ok_pid_or_error().
+-spec register(pid(), pid()) -> 'ok'.
+-spec delete_all(pid()) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
index aab2812a07d90e4d89f9ea683f15eb672e020201..ee248027276cd0bdb97ce59c7dd1331f4b330128 100644 (file)
@@ -22,8 +22,6 @@
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -callback startup(rabbit_types:amqqueue()) -> 'ok'.
 
 -callback shutdown(rabbit_types:amqqueue()) -> 'ok'.
 -callback consumer_state_changed(
             rabbit_types:amqqueue(), integer(), boolean()) -> 'ok'.
 
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{startup, 1}, {shutdown, 1}, {policy_changed, 2},
-     {active_for, 1}, {consumer_state_changed, 3}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
-
 %%----------------------------------------------------------------------------
 
 select(Modules) ->
index a73a307cf8dd7ab2f5944be7a96f00564a4e37f4..21596ff0a933cf34e2404b0fa4eb903bf54bd1a3 100644 (file)
 
 -module(rabbit_queue_master_locator).
 
--ifdef(use_specs).
-
 -callback description()                -> [proplists:property()].
--callback queue_master_location(pid()) -> {'ok', node()} | {'error', term()}.
-
--else.
-
--export([behaviour_info/1]).
-behaviour_info(callbacks) ->
-    [{description,           0},
-     {queue_master_location, 1}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
+-callback queue_master_location(rabbit_types:amqqueue()) ->
+    {'ok', node()} | {'error', term()}.
index 73513f9a738a3a22e4063ee3549ce3cffae6b91b..e5aeadcd268c970e6a31c5b23a7fa8cf1ef1bf65 100644 (file)
 -record(connection, {
           %% e.g. <<"127.0.0.1:55054 -> 127.0.0.1:5672">>
           name,
+          %% used for logging: same as `name`, but optionally
+          %% augmented with user-supplied name
+          log_name,
           %% server host
           host,
           %% client host
 }).
 
 -define(STATISTICS_KEYS, [pid, recv_oct, recv_cnt, send_oct, send_cnt,
-                          send_pend, state, channels]).
+                          send_pend, state, channels, reductions,
+                          garbage_collection]).
 
 -define(CREATION_EVENT_KEYS,
         [pid, name, port, peer_port, host,
 -define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
 
 -define(AUTH_NOTIFICATION_INFO_KEYS,
-        [host, vhost, name, peer_host, peer_port, protocol, auth_mechanism,
+        [host, name, peer_host, peer_port, protocol, auth_mechanism,
          ssl, ssl_protocol, ssl_cipher, peer_cert_issuer, peer_cert_subject,
          peer_cert_validity]).
 
 
 %%--------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/3 :: (pid(), any(), rabbit_net:socket()) -> rabbit_types:ok(pid())).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (pid()) -> rabbit_types:infos()).
--spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()).
--spec(force_event_refresh/2 :: (pid(), reference()) -> 'ok').
--spec(shutdown/2 :: (pid(), string()) -> 'ok').
--type(resource_alert() :: {WasAlarmSetForNode :: boolean(),
+-spec start_link(pid(), any(), rabbit_net:socket()) -> rabbit_types:ok(pid()).
+-spec info_keys() -> rabbit_types:info_keys().
+-spec info(pid()) -> rabbit_types:infos().
+-spec info(pid(), rabbit_types:info_keys()) -> rabbit_types:infos().
+-spec force_event_refresh(pid(), reference()) -> 'ok'.
+-spec shutdown(pid(), string()) -> 'ok'.
+-type resource_alert() :: {WasAlarmSetForNode :: boolean(),
                            IsThereAnyAlarmsWithSameSourceInTheCluster :: boolean(),
-                           NodeForWhichAlarmWasSetOrCleared :: node()}).
--spec(conserve_resources/3 :: (pid(), atom(), resource_alert()) -> 'ok').
--spec(server_properties/1 :: (rabbit_types:protocol()) ->
-                                  rabbit_framing:amqp_table()).
+                           NodeForWhichAlarmWasSetOrCleared :: node()}.
+-spec conserve_resources(pid(), atom(), resource_alert()) -> 'ok'.
+-spec server_properties(rabbit_types:protocol()) ->
+          rabbit_framing:amqp_table().
 
 %% These specs only exists to add no_return() to keep dialyzer happy
--spec(init/4 :: (pid(), pid(), any(), rabbit_net:socket()) -> no_return()).
--spec(start_connection/4 ::
-        (pid(), pid(), any(), rabbit_net:socket()) -> no_return()).
-
--spec(mainloop/4 :: (_,[binary()], non_neg_integer(), #v1{}) -> any()).
--spec(system_code_change/4 :: (_,_,_,_) -> {'ok',_}).
--spec(system_continue/3 :: (_,_,{[binary()], non_neg_integer(), #v1{}}) ->
-                                any()).
--spec(system_terminate/4 :: (_,_,_,_) -> none()).
+-spec init(pid(), pid(), any(), rabbit_net:socket()) -> no_return().
+-spec start_connection(pid(), pid(), any(), rabbit_net:socket()) ->
+          no_return().
 
--endif.
+-spec mainloop(_,[binary()], non_neg_integer(), #v1{}) -> any().
+-spec system_code_change(_,_,_,_) -> {'ok',_}.
+-spec system_continue(_,_,{[binary()], non_neg_integer(), #v1{}}) -> any().
+-spec system_terminate(_,_,_,_) -> none().
 
 %%--------------------------------------------------------------------------
 
@@ -337,7 +336,7 @@ socket_op(Sock, Fun) ->
 start_connection(Parent, HelperSup, Deb, Sock) ->
     process_flag(trap_exit, true),
     Name = case rabbit_net:connection_string(Sock, inbound) of
-               {ok, Str}         -> Str;
+               {ok, Str}         -> list_to_binary(Str);
                {error, enotconn} -> rabbit_net:fast_close(Sock),
                                     exit(normal);
                {error, Reason}   -> socket_error(Reason),
@@ -349,11 +348,12 @@ start_connection(Parent, HelperSup, Deb, Sock) ->
     erlang:send_after(HandshakeTimeout, self(), handshake_timeout),
     {PeerHost, PeerPort, Host, Port} =
         socket_op(Sock, fun (S) -> rabbit_net:socket_ends(S, inbound) end),
-    ?store_proc_name(list_to_binary(Name)),
+    ?store_proc_name(Name),
     State = #v1{parent              = Parent,
                 sock                = Sock,
                 connection          = #connection{
-                  name               = list_to_binary(Name),
+                  name               = Name,
+                  log_name           = Name,
                   host               = Host,
                   peer_host          = PeerHost,
                   port               = Port,
@@ -387,10 +387,10 @@ start_connection(Parent, HelperSup, Deb, Sock) ->
              [Deb, [], 0, switch_callback(rabbit_event:init_stats_timer(
                                             State, #v1.stats_timer),
                                           handshake, 8)]}),
-        log(info, "closing AMQP connection ~p (~s)~n", [self(), Name])
+        log(info, "closing AMQP connection ~p (~s)~n", [self(), dynamic_connection_name(Name)])
     catch
         Ex ->
-          log_connection_exception(Name, Ex)
+          log_connection_exception(dynamic_connection_name(Name), Ex)
     after
         %% We don't call gen_tcp:close/1 here since it waits for
         %% pending output to be sent, which results in unnecessary
@@ -744,9 +744,9 @@ wait_for_channel_termination(0, TimerRef, State) ->
 wait_for_channel_termination(N, TimerRef,
                              State = #v1{connection_state = CS,
                                          connection = #connection{
-                                                         name  = ConnName,
-                                                         user  = User,
-                                                         vhost = VHost},
+                                                         log_name  = ConnName,
+                                                         user      = User,
+                                                         vhost     = VHost},
                                          sock = Sock}) ->
     receive
         {'DOWN', _MRef, process, ChPid, Reason} ->
@@ -794,9 +794,9 @@ format_hard_error(Reason) ->
 
 log_hard_error(#v1{connection_state = CS,
                    connection = #connection{
-                                   name  = ConnName,
-                                   user  = User,
-                                   vhost = VHost}}, Channel, Reason) ->
+                                   log_name  = ConnName,
+                                   user      = User,
+                                   vhost     = VHost}}, Channel, Reason) ->
     log(error,
         "Error on AMQP connection ~p (~s, vhost: '~s',"
         " user: '~s', state: ~p), channel ~p:~n~s~n",
@@ -812,7 +812,7 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol},
     respond_and_close(State, Channel, Protocol, Reason, Reason);
 %% authentication failure
 handle_exception(State = #v1{connection = #connection{protocol = Protocol,
-                                                      name = ConnName,
+                                                      log_name = ConnName,
                                                       capabilities = Capabilities},
                              connection_state = starting},
                  Channel, Reason = #amqp_error{name = access_refused,
@@ -831,7 +831,7 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol,
 %% when loopback-only user tries to connect from a non-local host
 %% when user tries to access a vhost it has no permissions for
 handle_exception(State = #v1{connection = #connection{protocol = Protocol,
-                                                      name = ConnName,
+                                                      log_name = ConnName,
                                                       user = User},
                              connection_state = opening},
                  Channel, Reason = #amqp_error{name = not_allowed,
@@ -848,7 +848,7 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol},
 %% when negotiation fails, e.g. due to channel_max being higher than the
 %% maxiumum allowed limit
 handle_exception(State = #v1{connection = #connection{protocol = Protocol,
-                                                      name = ConnName,
+                                                      log_name = ConnName,
                                                       user = User},
                              connection_state = tuning},
                  Channel, Reason = #amqp_error{name = not_allowed,
@@ -1102,9 +1102,8 @@ refuse_connection(Sock, Exception, {A, B, C, D}) ->
     ok = inet_op(fun () -> rabbit_net:send(Sock, <<"AMQP",A,B,C,D>>) end),
     throw(Exception).
 
--ifdef(use_specs).
--spec(refuse_connection/2 :: (rabbit_net:socket(), any()) -> no_return()).
--endif.
+-spec refuse_connection(rabbit_net:socket(), any()) -> no_return().
+
 refuse_connection(Sock, Exception) ->
     refuse_connection(Sock, Exception, {0, 0, 9, 1}).
 
@@ -1134,7 +1133,7 @@ handle_method0(#'connection.start_ok'{mechanism = Mechanism,
                                       response = Response,
                                       client_properties = ClientProperties},
                State0 = #v1{connection_state = starting,
-                            connection       = Connection,
+                            connection       = Connection0,
                             sock             = Sock}) ->
     AuthMechanism = auth_mechanism_to_module(Mechanism, Sock),
     Capabilities =
@@ -1142,13 +1141,14 @@ handle_method0(#'connection.start_ok'{mechanism = Mechanism,
             {table, Capabilities1} -> Capabilities1;
             _                      -> []
         end,
+    Connection1 = Connection0#connection{
+                    client_properties = ClientProperties,
+                    capabilities      = Capabilities,
+                    auth_mechanism    = {Mechanism, AuthMechanism},
+                    auth_state        = AuthMechanism:init(Sock)},
+    Connection2 = augment_connection_log_name(Connection1),
     State = State0#v1{connection_state = securing,
-                      connection       =
-                          Connection#connection{
-                            client_properties = ClientProperties,
-                            capabilities      = Capabilities,
-                            auth_mechanism    = {Mechanism, AuthMechanism},
-                            auth_state        = AuthMechanism:init(Sock)}},
+                      connection       = Connection2},
     auth_phase(Response, State);
 
 handle_method0(#'connection.secure_ok'{response = Response},
@@ -1253,7 +1253,7 @@ validate_negotiated_integer_value(Field, Min, ClientValue) ->
 
 %% keep dialyzer happy
 -spec fail_negotiation(atom(), 'min' | 'max', integer(), integer()) ->
-                              no_return().
+          no_return().
 fail_negotiation(Field, MinOrMax, ServerValue, ClientValue) ->
     {S1, S2} = case MinOrMax of
                    min -> {lower,  minimum};
@@ -1334,11 +1334,10 @@ auth_phase(Response,
                                                         auth_state = none}}
     end.
 
--ifdef(use_specs).
--spec(auth_fail/5 ::
+-spec auth_fail
         (rabbit_types:username() | none, string(), [any()], binary(), #v1{}) ->
-           no_return()).
--endif.
+            no_return().
+
 auth_fail(Username, Msg, Args, AuthName,
           State = #v1{connection = #connection{protocol     = Protocol,
                                                capabilities = Capabilities}}) ->
@@ -1407,6 +1406,11 @@ i(state, #v1{connection_state = ConnectionState,
         true  -> flow;
         false -> ConnectionState
     end;
+i(garbage_collection, _State) ->
+    rabbit_misc:get_gc_info(self());
+i(reductions, _State) ->
+    {reductions, Reductions} = erlang:process_info(self(), reductions),
+    Reductions;
 i(Item,               #v1{connection = Conn}) -> ic(Item, Conn).
 
 ic(name,              #connection{name        = Name})     -> Name;
@@ -1465,18 +1469,11 @@ emit_stats(State) ->
     Infos = infos(?STATISTICS_KEYS, State),
     rabbit_event:notify(connection_stats, Infos),
     State1 = rabbit_event:reset_stats_timer(State, #v1.stats_timer),
-    %% If we emit an event which looks like we are in flow control, it's not a
-    %% good idea for it to be our last even if we go idle. Keep emitting
-    %% events, either we stay busy or we drop out of flow control.
-    case proplists:get_value(state, Infos) of
-        flow -> ensure_stats_timer(State1);
-        _    -> State1
-    end.
+    ensure_stats_timer(State1).
 
 %% 1.0 stub
--ifdef(use_specs).
--spec(become_1_0/2 :: (non_neg_integer(), #v1{}) -> no_return()).
--endif.
+-spec become_1_0(non_neg_integer(), #v1{}) -> no_return().
+
 become_1_0(Id, State = #v1{sock = Sock}) ->
     case code:is_loaded(rabbit_amqp1_0_reader) of
         false -> refuse_connection(Sock, amqp1_0_plugin_not_enabled);
@@ -1511,3 +1508,23 @@ send_error_on_channel0_and_close(Channel, Protocol, Reason, State) ->
     State1 = close_connection(terminate_channels(State)),
     ok = send_on_channel0(State#v1.sock, CloseMethod, Protocol),
     State1.
+
+augment_connection_log_name(#connection{client_properties = ClientProperties,
+                                        name = Name} = Connection) ->
+    case rabbit_misc:table_lookup(ClientProperties, <<"connection_name">>) of
+        {longstr, UserSpecifiedName} ->
+            LogName = <<Name/binary, " - ", UserSpecifiedName/binary>>,
+            log(info, "Connection ~p (~s) has a client-provided name: ~s~n", [self(), Name, UserSpecifiedName]),
+            ?store_proc_name(LogName),
+            Connection#connection{log_name = LogName};
+        _ ->
+            Connection
+    end.
+
+dynamic_connection_name(Default) ->
+    case rabbit_misc:get_proc_name() of
+        {ok, Name} ->
+            Name;
+        _ ->
+            Default
+    end.
index 4e360687ad3450ad664b1b857f5ddbee2e7c53c3..e287d2f2bb5182c19ce088036c9454ce10e84228 100644 (file)
@@ -16,8 +16,6 @@
 
 -module(rabbit_runtime_parameter).
 
--ifdef(use_specs).
-
 -type(validate_results() ::
         'ok' | {error, string(), [term()]} | [validate_results()]).
 
                    term(), rabbit_types:user()) -> validate_results().
 -callback notify(rabbit_types:vhost(), binary(), binary(), term()) -> 'ok'.
 -callback notify_clear(rabbit_types:vhost(), binary(), binary()) -> 'ok'.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [
-     {validate, 5},
-     {notify, 4},
-     {notify_clear, 3}
-    ];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
similarity index 99%
rename from rabbitmq-server/src/rabbit_types.erl
rename to rabbitmq-server/deps/rabbit_common/src/rabbit_types.erl
index 3dcb63cbb96fd851efc9e7abb1634e901f61aef4..29a3ef92a18a0581b96bbe55b18376b9308c4d03 100644 (file)
@@ -18,8 +18,6 @@
 
 -include("rabbit.hrl").
 
--ifdef(use_specs).
-
 -export_type([maybe/1, info/0, infos/0, info_key/0, info_keys/0,
               message/0, msg_id/0, basic_message/0,
               delivery/0, content/0, decoded_content/0, undecoded_content/0,
 
 -type(proc_name() :: term()).
 -type(proc_type_and_name() :: {atom(), proc_name()}).
-
--endif. % use_specs
index 3153a9642e0af9a20de3768a6a7034db1a531f93..3884f1a1e99cad6f7d54ccb38282d05b327a3ebe 100644 (file)
 
 %%---------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start/6 ::
+-spec start
         (rabbit_net:socket(), rabbit_channel:channel_number(),
          non_neg_integer(), rabbit_types:protocol(), pid(),
-         rabbit_types:proc_name())
-        -> rabbit_types:ok(pid())).
--spec(start_link/6 ::
+         rabbit_types:proc_name()) ->
+            rabbit_types:ok(pid()).
+-spec start_link
         (rabbit_net:socket(), rabbit_channel:channel_number(),
          non_neg_integer(), rabbit_types:protocol(), pid(),
-         rabbit_types:proc_name())
-        -> rabbit_types:ok(pid())).
--spec(start/7 ::
+         rabbit_types:proc_name()) ->
+            rabbit_types:ok(pid()).
+-spec start
         (rabbit_net:socket(), rabbit_channel:channel_number(),
          non_neg_integer(), rabbit_types:protocol(), pid(),
-         rabbit_types:proc_name(), boolean())
-        -> rabbit_types:ok(pid())).
--spec(start_link/7 ::
+         rabbit_types:proc_name(), boolean()) ->
+            rabbit_types:ok(pid()).
+-spec start_link
         (rabbit_net:socket(), rabbit_channel:channel_number(),
          non_neg_integer(), rabbit_types:protocol(), pid(),
-         rabbit_types:proc_name(), boolean())
-        -> rabbit_types:ok(pid())).
-
--spec(system_code_change/4 :: (_,_,_,_) -> {'ok',_}).
--spec(system_continue/3 :: (_,_,#wstate{}) -> any()).
--spec(system_terminate/4 :: (_,_,_,_) -> none()).
-
--spec(send_command/2 ::
-        (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(send_command/3 ::
-        (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
-        -> 'ok').
--spec(send_command_sync/2 ::
-        (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(send_command_sync/3 ::
-        (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
-        -> 'ok').
--spec(send_command_and_notify/4 ::
-        (pid(), pid(), pid(), rabbit_framing:amqp_method_record())
-        -> 'ok').
--spec(send_command_and_notify/5 ::
+         rabbit_types:proc_name(), boolean()) ->
+            rabbit_types:ok(pid()).
+
+-spec system_code_change(_,_,_,_) -> {'ok',_}.
+-spec system_continue(_,_,#wstate{}) -> any().
+-spec system_terminate(_,_,_,_) -> none().
+
+-spec send_command(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command
+        (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) ->
+            'ok'.
+-spec send_command_sync(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command_sync
+        (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) ->
+            'ok'.
+-spec send_command_and_notify
+        (pid(), pid(), pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command_and_notify
         (pid(), pid(), pid(), rabbit_framing:amqp_method_record(),
-         rabbit_types:content())
-        -> 'ok').
--spec(send_command_flow/2 ::
-        (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(send_command_flow/3 ::
-        (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
-        -> 'ok').
--spec(flush/1 :: (pid()) -> 'ok').
--spec(internal_send_command/4 ::
+         rabbit_types:content()) ->
+            'ok'.
+-spec send_command_flow(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command_flow
+        (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) ->
+            'ok'.
+-spec flush(pid()) -> 'ok'.
+-spec internal_send_command
         (rabbit_net:socket(), rabbit_channel:channel_number(),
-         rabbit_framing:amqp_method_record(), rabbit_types:protocol())
-        -> 'ok').
--spec(internal_send_command/6 ::
+         rabbit_framing:amqp_method_record(), rabbit_types:protocol()) ->
+            'ok'.
+-spec internal_send_command
         (rabbit_net:socket(), rabbit_channel:channel_number(),
          rabbit_framing:amqp_method_record(), rabbit_types:content(),
-         non_neg_integer(), rabbit_types:protocol())
-        -> 'ok').
-
--endif.
+         non_neg_integer(), rabbit_types:protocol()) ->
+            'ok'.
 
 %%---------------------------------------------------------------------------
 
diff --git a/rabbitmq-server/deps/rabbit_common/src/rand_compat.erl b/rabbitmq-server/deps/rabbit_common/src/rand_compat.erl
new file mode 100644 (file)
index 0000000..e304fc8
--- /dev/null
@@ -0,0 +1,124 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rand_compat).
+
+%% We don't want warnings about the use of erlang:now/0 in
+%% this module.
+-compile(nowarn_deprecated_function).
+
+%% Declare versioned functions to allow dynamic code loading,
+%% depending on the Erlang version running. See 'code_version.erl' for details
+-erlang_version_support([
+    {18, [
+        {seed, 1, seed_pre_18, seed_post_18},
+        {seed, 2, seed_pre_18, seed_post_18},
+        {uniform, 0, uniform_pre_18, uniform_post_18},
+        {uniform, 1, uniform_pre_18, uniform_post_18},
+        {uniform_s, 1, uniform_s_pre_18, uniform_s_post_18},
+        {uniform_s, 2, uniform_s_pre_18, uniform_s_post_18}
+      ]}
+  ]).
+
+-export([
+    seed/1, seed_pre_18/1, seed_post_18/1,
+    seed/2, seed_pre_18/2, seed_post_18/2,
+    uniform/0, uniform_pre_18/0, uniform_post_18/0,
+    uniform/1, uniform_pre_18/1, uniform_post_18/1,
+    uniform_s/1, uniform_s_pre_18/1, uniform_s_post_18/1,
+    uniform_s/2, uniform_s_pre_18/2, uniform_s_post_18/2
+  ]).
+
+-define(IS_ALG(A), (A =:= exs64 orelse A =:= exsplus orelse A =:= exs1024)).
+
+%% export_seed_s/1 can't be implemented with `random`.
+%% export_seed_s/2. can't be implemented with `random`.
+
+%% normal_s/1 can't be implemented with `random`.
+%% normal_s/2. can't be implemented with `random`.
+
+%% seed/1.
+
+seed(AlgOrExpState) ->
+    code_version:update(?MODULE),
+    ?MODULE:seed(AlgOrExpState).
+
+seed_pre_18(Alg) when ?IS_ALG(Alg) -> random:seed();
+seed_pre_18(ExpState)              -> random:seed(ExpState).
+seed_post_18(AlgOrExpState)        -> rand:seed(AlgOrExpState).
+
+%% seed/2.
+
+seed(Alg, ExpState) ->
+    code_version:update(?MODULE),
+    ?MODULE:seed(Alg, ExpState).
+
+seed_pre_18(_Alg, ExpState) -> random:seed(ExpState).
+seed_post_18(Alg, ExpState) -> rand:seed(Alg, ExpState).
+
+%% seed_s/1 can't be implemented with `random`.
+%% seed_s/2. can't be implemented with `random`.
+
+%% uniform/0.
+
+uniform() ->
+    code_version:update(?MODULE),
+    ?MODULE:uniform().
+
+ensure_random_seed() ->
+    case get(random_seed) of
+        undefined ->
+            random:seed(erlang:phash2([node()]),
+                        time_compat:monotonic_time(),
+                        time_compat:unique_integer());
+        _ -> ok
+    end.
+
+uniform_pre_18()  ->
+    ensure_random_seed(),
+    random:uniform().
+
+uniform_post_18() -> rand:uniform().
+
+%% uniform/1.
+
+uniform(N) ->
+    code_version:update(?MODULE),
+    ?MODULE:uniform(N).
+
+uniform_pre_18(N)  ->
+    ensure_random_seed(),
+    random:uniform(N).
+
+uniform_post_18(N) -> rand:uniform(N).
+
+%% uniform_s/1.
+
+uniform_s(State) ->
+    code_version:update(?MODULE),
+    ?MODULE:uniform_s(State).
+
+uniform_s_pre_18(State)  -> random:uniform_s(State).
+uniform_s_post_18(State) -> rand:uniform_s(State).
+
+%% uniform_s/2.
+
+uniform_s(N, State) ->
+    code_version:update(?MODULE),
+    ?MODULE:uniform_s(N, State).
+
+uniform_s_pre_18(N, State)  -> random:uniform_s(N, State).
+uniform_s_post_18(N, State) -> rand:uniform_s(N, State).
index d9cf3901b2de46ee1b36f3282c1896a68a0ee088..e007667ed933cb2ac0a1be2fd671cad74b8eee2c 100644 (file)
 %% this module.
 -compile(nowarn_deprecated_function).
 
+%% Declare versioned functions to allow dynamic code loading,
+%% depending on the Erlang version running. See 'code_version.erl' for details
+-erlang_version_support(
+   [{18, [{connection_information, 1, connection_information_pre_18,
+           connection_information_post_18},
+          {connection_information, 2, connection_information_pre_18,
+           connection_information_post_18}]}
+   ]).
+
 -export([connection_information/1,
-         connection_information/2]).
+         connection_information_pre_18/1,
+         connection_information_post_18/1,
+         connection_information/2,
+         connection_information_pre_18/2,
+         connection_information_post_18/2]).
 
 connection_information(SslSocket) ->
-    try
-        ssl:connection_information(SslSocket)
-    catch
-        error:undef ->
+    code_version:update(?MODULE),
+    ssl_compat:connection_information(SslSocket).
+
+connection_information_post_18(SslSocket) ->
+    ssl:connection_information(SslSocket).
+
+connection_information_pre_18(SslSocket) ->
+    case ssl:connection_info(SslSocket) of
+        {ok, {ProtocolVersion, CipherSuite}} ->
+            {ok, [{protocol, ProtocolVersion},
+                  {cipher_suite, CipherSuite}]};
+        {error, Reason} ->
+            {error, Reason}
+    end.
+
+connection_information(SslSocket, Items) ->
+    code_version:update(?MODULE),
+    ssl_compat:connection_information(SslSocket, Items).
+
+connection_information_post_18(SslSocket, Items) ->
+    ssl:connection_information(SslSocket, Items).
+
+connection_information_pre_18(SslSocket, Items) ->
+    WantProtocolVersion = lists:member(protocol, Items),
+    WantCipherSuite = lists:member(cipher_suite, Items),
+    if
+        WantProtocolVersion orelse WantCipherSuite ->
             case ssl:connection_info(SslSocket) of
                 {ok, {ProtocolVersion, CipherSuite}} ->
-                    {ok, [{protocol, ProtocolVersion},
-                          {cipher_suite, CipherSuite}]};
+                    filter_information_items(ProtocolVersion,
+                                             CipherSuite,
+                                             Items,
+                                             []);
                 {error, Reason} ->
                     {error, Reason}
-            end
-    end.
-
-connection_information(SslSocket, Items) ->
-    try
-        ssl:connection_information(SslSocket, Items)
-    catch
-        error:undef ->
-            WantProtocolVersion = lists:member(protocol, Items),
-            WantCipherSuite = lists:member(cipher_suite, Items),
-            if
-                WantProtocolVersion orelse WantCipherSuite ->
-                    case ssl:connection_info(SslSocket) of
-                        {ok, {ProtocolVersion, CipherSuite}} ->
-                            filter_information_items(ProtocolVersion,
-                                                     CipherSuite,
-                                                     Items,
-                                                     []);
-                        {error, Reason} ->
-                            {error, Reason}
-                    end;
-                true ->
-                    {ok, []}
-            end
+            end;
+        true ->
+            {ok, []}
     end.
 
 filter_information_items(ProtocolVersion, CipherSuite, [protocol | Rest],
index c8ffbb12ea66fa6b80861ad12dbb3ef28856337e..22b78689ab7bc1fdd253e42b42f707bfcb55d8ac 100644 (file)
         terminate/2, code_change/3]).
 -export([try_again_restart/3]).
 
-%%--------------------------------------------------------------------------
--ifdef(use_specs).
 -export_type([child_spec/0, startchild_ret/0, strategy/0, sup_name/0]).
--endif.
-%%--------------------------------------------------------------------------
 
--ifdef(use_specs).
 -type child()    :: 'undefined' | pid().
 -type child_id() :: term().
 -type mfargs()   :: {M :: module(), F :: atom(), A :: [term()] | undefined}.
 
 -type strategy() :: 'one_for_all' | 'one_for_one'
                   | 'rest_for_one' | 'simple_one_for_one'.
--endif.
 
 %%--------------------------------------------------------------------------
 
--ifdef(use_specs).
 -record(child, {% pid is undefined when child is not running
                pid = undefined :: child() | {restarting,pid()} | [pid()],
                name            :: child_id(),
                child_type      :: worker(),
                modules = []    :: modules()}).
 -type child_rec() :: #child{}.
--else.
--record(child, {
-               pid = undefined,
-               name,
-               mfargs,
-               restart_type,
-               shutdown,
-               child_type,
-               modules = []}).
--endif.
 
 -define(DICT, dict).
 -define(SETS, sets).
 -define(SET, set).
 
--ifdef(use_specs).
+-include("include/old_builtin_types.hrl").
+
 -record(state, {name,
                strategy               :: strategy(),
                children = []          :: [child_rec()],
-               dynamics               :: ?DICT:?DICT() | ?SETS:?SET(),
+               dynamics               :: ?DICT_TYPE() | ?SET_TYPE(),
                intensity              :: non_neg_integer(),
                period                 :: pos_integer(),
                restarts = [],
                module,
                args}).
 -type state() :: #state{}.
--else.
--record(state, {name,
-               strategy,
-               children = [],
-               dynamics,
-               intensity,
-               period,
-               restarts = [],
-               module,
-               args}).
--endif.
 
 -define(is_simple(State), State#state.strategy =:= simple_one_for_one).
 -define(is_permanent(R), ((R =:= permanent) orelse
 -define(is_explicit_restart(R),
         R == {shutdown, restart}).
 
--ifdef(use_specs).
 -callback init(Args :: term()) ->
     {ok, {{RestartStrategy :: strategy(),
            MaxR            :: non_neg_integer(),
            MaxT            :: non_neg_integer()},
            [ChildSpec :: child_spec()]}}
     | ignore.
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{init,1}];
-behaviour_info(_Other) ->
-    undefined.
 
--endif.
 -define(restarting(_Pid_), {restarting,_Pid_}).
 
 %%% ---------------------------------------------------
@@ -188,7 +151,7 @@ behaviour_info(_Other) ->
 %%% Servers/processes should/could also be built using gen_server.erl.
 %%% SupName = {local, atom()} | {global, atom()}.
 %%% ---------------------------------------------------
--ifdef(use_specs).
+
 -type startlink_err() :: {'already_started', pid()}
                          | {'shutdown', term()}
                          | term().
@@ -198,23 +161,21 @@ behaviour_info(_Other) ->
       Module :: module(),
       Args :: term().
 
--endif.
 start_link(Mod, Args) ->
     gen_server:start_link(?MODULE, {self, Mod, Args}, []).
--ifdef(use_specs).
+
 -spec start_link(SupName, Module, Args) -> startlink_ret() when
       SupName :: sup_name(),
       Module :: module(),
       Args :: term().
--endif.
+
 start_link(SupName, Mod, Args) ->
     gen_server:start_link(SupName, ?MODULE, {SupName, Mod, Args}, []).
+
 %%% ---------------------------------------------------
 %%% Interface functions.
 %%% ---------------------------------------------------
--ifdef(use_specs).
+
 -type startchild_err() :: 'already_present'
                        | {'already_started', Child :: child()} | term().
 -type startchild_ret() :: {'ok', Child :: child()}
@@ -224,11 +185,10 @@ start_link(SupName, Mod, Args) ->
 -spec start_child(SupRef, ChildSpec) -> startchild_ret() when
       SupRef :: sup_ref(),
       ChildSpec :: child_spec() | (List :: [term()]).
--endif.
+
 start_child(Supervisor, ChildSpec) ->
     call(Supervisor, {start_child, ChildSpec}).
 
--ifdef(use_specs).
 -spec restart_child(SupRef, Id) -> Result when
       SupRef :: sup_ref(),
       Id :: child_id(),
@@ -237,17 +197,16 @@ start_child(Supervisor, ChildSpec) ->
               | {'error', Error},
       Error :: 'running' | 'restarting' | 'not_found' | 'simple_one_for_one' |
               term().
--endif.
+
 restart_child(Supervisor, Name) ->
     call(Supervisor, {restart_child, Name}).
 
--ifdef(use_specs).
 -spec delete_child(SupRef, Id) -> Result when
       SupRef :: sup_ref(),
       Id :: child_id(),
       Result :: 'ok' | {'error', Error},
       Error :: 'running' | 'restarting' | 'not_found' | 'simple_one_for_one'.
--endif.
+
 delete_child(Supervisor, Name) ->
     call(Supervisor, {delete_child, Name}).
 
@@ -257,28 +216,26 @@ delete_child(Supervisor, Name) ->
 %%          Note that the child is *always* terminated in some
 %%          way (maybe killed).
 %%-----------------------------------------------------------------
--ifdef(use_specs).
+
 -spec terminate_child(SupRef, Id) -> Result when
       SupRef :: sup_ref(),
       Id :: pid() | child_id(),
       Result :: 'ok' | {'error', Error},
       Error :: 'not_found' | 'simple_one_for_one'.
--endif.
+
 terminate_child(Supervisor, Name) ->
     call(Supervisor, {terminate_child, Name}).
 
--ifdef(use_specs).
 -spec which_children(SupRef) -> [{Id,Child,Type,Modules}] when
       SupRef :: sup_ref(),
       Id :: child_id() | undefined,
       Child :: child() | 'restarting',
       Type :: worker(),
       Modules :: modules().
--endif.
+
 which_children(Supervisor) ->
     call(Supervisor, which_children).
 
--ifdef(use_specs).
 -spec count_children(SupRef) -> PropListOfCounts when
       SupRef :: sup_ref(),
       PropListOfCounts :: [Count],
@@ -286,15 +243,14 @@ which_children(Supervisor) ->
              | {active, ActiveProcessCount :: non_neg_integer()}
              | {supervisors, ChildSupervisorCount :: non_neg_integer()}
              |{workers, ChildWorkerCount :: non_neg_integer()}.
--endif.
+
 count_children(Supervisor) ->
     call(Supervisor, count_children).
 
--ifdef(use_specs).
 -spec find_child(Supervisor, Name) -> [pid()] when
       Supervisor :: sup_ref(),
       Name :: child_id().
--endif.
+
 find_child(Supervisor, Name) ->
     [Pid || {Name1, Pid, _Type, _Modules} <- which_children(Supervisor),
             Name1 =:= Name].
@@ -302,11 +258,10 @@ find_child(Supervisor, Name) ->
 call(Supervisor, Req) ->
     gen_server:call(Supervisor, Req, infinity).
 
--ifdef(use_specs).
 -spec check_childspecs(ChildSpecs) -> Result when
       ChildSpecs :: [child_spec()],
       Result :: 'ok' | {'error', Error :: term()}.
--endif.
+
 check_childspecs(ChildSpecs) when is_list(ChildSpecs) ->
     case check_startspec(ChildSpecs) of
        {ok, _} -> ok;
@@ -316,12 +271,12 @@ check_childspecs(X) -> {error, {badarg, X}}.
 
 %%%-----------------------------------------------------------------
 %%% Called by timer:apply_after from restart/2
--ifdef(use_specs).
+
 -spec try_again_restart(SupRef, Child, Reason) -> ok when
       SupRef :: sup_ref(),
       Child :: child_id() | pid(),
       Reason :: term().
--endif.
+
 try_again_restart(Supervisor, Child, Reason) ->
     cast(Supervisor, {try_again_restart, Child, Reason}).
 
@@ -329,11 +284,11 @@ cast(Supervisor, Req) ->
     gen_server:cast(Supervisor, Req).
 
 %%% ---------------------------------------------------
-%%% 
+%%%
 %%% Initialize the supervisor.
-%%% 
+%%%
 %%% ---------------------------------------------------
--ifdef(use_specs).
+
 -type init_sup_name() :: sup_name() | 'self'.
 
 -type stop_rsn() :: {'shutdown', term()}
@@ -344,7 +299,7 @@ cast(Supervisor, Req) ->
 
 -spec init({init_sup_name(), module(), [term()]}) ->
         {'ok', state()} | 'ignore' | {'stop', stop_rsn()}.
--endif.
+
 init({SupName, Mod, Args}) ->
     process_flag(trap_exit, true),
     case Mod:init(Args) of
@@ -448,14 +403,14 @@ do_start_child_i(M, F, A) ->
     end.
 
 %%% ---------------------------------------------------
-%%% 
+%%%
 %%% Callback functions.
-%%% 
+%%%
 %%% ---------------------------------------------------
--ifdef(use_specs).
+
 -type call() :: 'which_children' | 'count_children' | {_, _}.  % XXX: refine
 -spec handle_call(call(), term(), state()) -> {'reply', term(), state()}.
--endif.
+
 handle_call({start_child, EArgs}, _From, State) when ?is_simple(State) ->
     Child = hd(State#state.children),
     #child{mfargs = {M, F, A}} = Child,
@@ -638,10 +593,10 @@ count_child(#child{pid = Pid, child_type = supervisor},
 %%% If a restart attempt failed, this message is sent via
 %%% timer:apply_after(0,...) in order to give gen_server the chance to
 %%% check it's inbox before trying again.
--ifdef(use_specs).
+
 -spec handle_cast({try_again_restart, child_id() | pid(), term()}, state()) ->
                         {'noreply', state()} | {stop, shutdown, state()}.
--endif.
+
 handle_cast({try_again_restart,Pid,Reason}, #state{children=[Child]}=State)
   when ?is_simple(State) ->
     RT = Child#child.restart_type,
@@ -667,10 +622,10 @@ handle_cast({try_again_restart,Name,Reason}, State) ->
 %%
 %% Take care of terminated children.
 %%
--ifdef(use_specs).
+
 -spec handle_info(term(), state()) ->
         {'noreply', state()} | {'stop', 'shutdown', state()}.
--endif.
+
 handle_info({'EXIT', Pid, Reason}, State) ->
     case restart_child(Pid, Reason, State) of
        {ok, State1} ->
@@ -698,16 +653,16 @@ handle_info({delayed_restart, {RestartType, Reason, Child}}, State) ->
 %% this is important.
 
 handle_info(Msg, State) ->
-    error_logger:error_msg("Supervisor received unexpected message: ~p~n", 
+    error_logger:error_msg("Supervisor received unexpected message: ~p~n",
                           [Msg]),
     {noreply, State}.
 
 %%
 %% Terminate this server.
 %%
--ifdef(use_specs).
+
 -spec terminate(term(), state()) -> 'ok'.
--endif.
+
 terminate(_Reason, #state{children=[Child]} = State) when ?is_simple(State) ->
     terminate_dynamic_children(Child, dynamics_db(Child#child.restart_type,
                                                   State#state.dynamics),
@@ -724,10 +679,10 @@ terminate(_Reason, State) ->
 %% NOTE: This requires that the init function of the call-back module
 %%       does not have any side effects.
 %%
--ifdef(use_specs).
+
 -spec code_change(term(), state(), term()) ->
         {'ok', state()} | {'error', term()}.
--endif.
+
 code_change(_, State, _) ->
     case (State#state.module):init(State#state.args) of
        {ok, {SupFlags, StartSpec}} ->
@@ -795,7 +750,7 @@ update_chsp(OldCh, Children) ->
        NewC ->
            {ok, NewC}
     end.
-    
+
 %%% ---------------------------------------------------
 %%% Start a new child.
 %%% ---------------------------------------------------
@@ -1049,13 +1004,13 @@ do_terminate(Child, _SupName) ->
     Child#child{pid = undefined}.
 
 %%-----------------------------------------------------------------
-%% Shutdowns a child. We must check the EXIT value 
+%% Shutdowns a child. We must check the EXIT value
 %% of the child, because it might have died with another reason than
-%% the wanted. In that case we want to report the error. We put a 
-%% monitor on the child an check for the 'DOWN' message instead of 
-%% checking for the 'EXIT' message, because if we check the 'EXIT' 
-%% message a "naughty" child, who does unlink(Sup), could hang the 
-%% supervisor. 
+%% the wanted. In that case we want to report the error. We put a
+%% monitor on the child an check for the 'DOWN' message instead of
+%% checking for the 'EXIT' message, because if we check the 'EXIT'
+%% message a "naughty" child, who does unlink(Sup), could hang the
+%% supervisor.
 %% Returns: ok | {error, OtherReason}  (this should be reported)
 %%-----------------------------------------------------------------
 shutdown(Pid, brutal_kill) ->
@@ -1068,14 +1023,14 @@ shutdown(Pid, brutal_kill) ->
                {'DOWN', _MRef, process, Pid, OtherReason} ->
                    {error, OtherReason}
            end;
-       {error, Reason} ->      
+       {error, Reason} ->
            {error, Reason}
     end;
 shutdown(Pid, Time) ->
     case monitor_child(Pid) of
        ok ->
            exit(Pid, shutdown), %% Try to shutdown gracefully
-           receive 
+           receive
                {'DOWN', _MRef, process, Pid, shutdown} ->
                    ok;
                {'DOWN', _MRef, process, Pid, OtherReason} ->
@@ -1087,14 +1042,14 @@ shutdown(Pid, Time) ->
                            {error, OtherReason}
                    end
            end;
-       {error, Reason} ->      
+       {error, Reason} ->
            {error, Reason}
     end.
 
 %% Help function to shutdown/2 switches from link to monitor approach
 monitor_child(Pid) ->
-    
-    %% Do the monitor operation first so that if the child dies 
+
+    %% Do the monitor operation first so that if the child dies
     %% before the monitoring is done causing a 'DOWN'-message with
     %% reason noproc, we will get the real reason in the 'EXIT'-message
     %% unless a naughty child has already done unlink...
@@ -1104,19 +1059,19 @@ monitor_child(Pid) ->
     receive
        %% If the child dies before the unlik we must empty
        %% the mail-box of the 'EXIT'-message and the 'DOWN'-message.
-       {'EXIT', Pid, Reason} -> 
-           receive 
+       {'EXIT', Pid, Reason} ->
+           receive
                {'DOWN', _, process, Pid, _} ->
                    {error, Reason}
            end
-    after 0 -> 
+    after 0 ->
            %% If a naughty child did unlink and the child dies before
-           %% monitor the result will be that shutdown/2 receives a 
+           %% monitor the result will be that shutdown/2 receives a
            %% 'DOWN'-message with reason noproc.
            %% If the child should die after the unlink there
            %% will be a 'DOWN'-message with a correct reason
-           %% that will be handled in shutdown/2. 
-           ok   
+           %% that will be handled in shutdown/2.
+           ok
     end.
 
 
@@ -1443,8 +1398,8 @@ validChildType(What) -> throw({invalid_child_type, What}).
 
 validName(_Name) -> true.
 
-validFunc({M, F, A}) when is_atom(M), 
-                          is_atom(F), 
+validFunc({M, F, A}) when is_atom(M),
+                          is_atom(F),
                           is_list(A) -> true;
 validFunc(Func)                      -> throw({invalid_mfa, Func}).
 
@@ -1462,7 +1417,7 @@ validDelay(Delay) when is_number(Delay),
                        Delay >= 0 -> true;
 validDelay(What)                  -> throw({invalid_delay, What}).
 
-validShutdown(Shutdown, _) 
+validShutdown(Shutdown, _)
   when is_integer(Shutdown), Shutdown > 0 -> true;
 validShutdown(infinity, _)             -> true;
 validShutdown(brutal_kill, _)          -> true;
@@ -1488,7 +1443,7 @@ validMods(Mods) -> throw({invalid_modules, Mods}).
 %%% Returns: {ok, State'} | {terminate, State'}
 %%% ------------------------------------------------------
 
-add_restart(State) ->  
+add_restart(State) ->
     I = State#state.intensity,
     P = State#state.period,
     R = State#state.restarts,
index b87c6cc550009d05858b79c38069965d36d037ce..66044312ba863df4f09d0fad96c548f076d66210 100644 (file)
 %% where it has not yet been deprecated.
 %%
 
+%% Declare versioned functions to allow dynamic code loading,
+%% depending on the Erlang version running. See 'code_version.erl' for details
+-erlang_version_support(
+   [{18,
+     [{monotonic_time, 0, monotonic_time_pre_18, monotonic_time_post_18},
+      {monotonic_time, 1, monotonic_time_pre_18, monotonic_time_post_18},
+      {erlang_system_time, 0, erlang_system_time_pre_18, erlang_system_time_post_18},
+      {erlang_system_time, 1, erlang_system_time_pre_18, erlang_system_time_post_18},
+      {os_system_time, 0, os_system_time_pre_18, os_system_time_post_18},
+      {os_system_time, 1, os_system_time_pre_18, os_system_time_post_18},
+      {time_offset, 0, time_offset_pre_18, time_offset_post_18},
+      {time_offset, 1, time_offset_pre_18, time_offset_post_18},
+      {convert_time_unit, 3, convert_time_unit_pre_18, convert_time_unit_post_18},
+      {timestamp, 0, timestamp_pre_18, timestamp_post_18},
+      {unique_integer, 0, unique_integer_pre_18, unique_integer_post_18},
+      {unique_integer, 1, unique_integer_pre_18, unique_integer_post_18}]}
+   ]).
+
 -export([monotonic_time/0,
-        monotonic_time/1,
-        erlang_system_time/0,
-        erlang_system_time/1,
-        os_system_time/0,
-        os_system_time/1,
-        time_offset/0,
-        time_offset/1,
-        convert_time_unit/3,
-        timestamp/0,
-        unique_integer/0,
-        unique_integer/1,
-        monitor/2,
-        system_info/1,
-        system_flag/2]).
+         monotonic_time_pre_18/0,
+         monotonic_time_post_18/0,
+         monotonic_time/1,
+         monotonic_time_pre_18/1,
+         monotonic_time_post_18/1,
+         erlang_system_time/0,
+         erlang_system_time_pre_18/0,
+         erlang_system_time_post_18/0,
+         erlang_system_time/1,
+         erlang_system_time_pre_18/1,
+         erlang_system_time_post_18/1,
+         os_system_time/0,
+         os_system_time_pre_18/0,
+         os_system_time_post_18/0,
+         os_system_time/1,
+         os_system_time_pre_18/1,
+         os_system_time_post_18/1,
+         time_offset/0,
+         time_offset_pre_18/0,
+         time_offset_post_18/0,
+         time_offset/1,
+         time_offset_pre_18/1,
+         time_offset_post_18/1,
+         convert_time_unit/3,
+         convert_time_unit_pre_18/3,
+         convert_time_unit_post_18/3,
+         timestamp/0,
+         timestamp_pre_18/0,
+         timestamp_post_18/0,
+         unique_integer/0,
+         unique_integer_pre_18/0,
+         unique_integer_post_18/0,
+         unique_integer/1,
+         unique_integer_pre_18/1,
+         unique_integer_post_18/1,
+         monitor/2,
+         system_info/1,
+         system_flag/2]).
 
 monotonic_time() ->
-    try
-       erlang:monotonic_time()
-    catch
-       error:undef ->
-           %% Use Erlang system time as monotonic time
-           erlang_system_time_fallback()
-    end.
+    code_version:update(?MODULE),
+    time_compat:monotonic_time().
+
+monotonic_time_post_18() ->
+       erlang:monotonic_time().
+
+monotonic_time_pre_18() ->
+    erlang_system_time_fallback().
 
 monotonic_time(Unit) ->
-    try
-       erlang:monotonic_time(Unit)
-    catch
-       error:badarg ->
-           erlang:error(badarg, [Unit]);
-       error:undef ->
-           %% Use Erlang system time as monotonic time
-           STime = erlang_system_time_fallback(),
-           try
-               convert_time_unit_fallback(STime, native, Unit)
-           catch
-               error:bad_time_unit -> erlang:error(badarg, [Unit])
-           end
-    end.
+    code_version:update(?MODULE),
+    time_compat:monotonic_time(Unit).
+
+monotonic_time_post_18(Unit) ->
+    erlang:monotonic_time(Unit).
+
+monotonic_time_pre_18(Unit) ->
+    %% Use Erlang system time as monotonic time
+    STime = erlang_system_time_fallback(),
+    convert_time_unit_fallback(STime, native, Unit).
 
 erlang_system_time() ->
-    try
-       erlang:system_time()
-    catch
-       error:undef ->
-           erlang_system_time_fallback()
-    end.
+    code_version:update(?MODULE),
+    time_compat:erlang_system_time().
+
+erlang_system_time_post_18() ->
+       erlang:system_time().
+
+erlang_system_time_pre_18() ->
+    erlang_system_time_fallback().
 
 erlang_system_time(Unit) ->
-    try
-       erlang:system_time(Unit)
-    catch
-       error:badarg ->
-           erlang:error(badarg, [Unit]);
-       error:undef ->
-           STime = erlang_system_time_fallback(),
-           try
-               convert_time_unit_fallback(STime, native, Unit)
-           catch
-               error:bad_time_unit -> erlang:error(badarg, [Unit])
-           end
-    end.
+    code_version:update(?MODULE),
+    time_compat:erlang_system_time(Unit).
+
+erlang_system_time_post_18(Unit) ->
+    erlang:system_time(Unit).
+
+erlang_system_time_pre_18(Unit) ->
+    STime = erlang_system_time_fallback(),
+    convert_time_unit_fallback(STime, native, Unit).
 
 os_system_time() ->
-    try
-       os:system_time()
-    catch
-       error:undef ->
-           os_system_time_fallback()
-    end.
+    code_version:update(?MODULE),
+    time_compat:os_system_time().
+
+os_system_time_post_18() ->
+       os:system_time().
+
+os_system_time_pre_18() ->
+    os_system_time_fallback().
 
 os_system_time(Unit) ->
-    try
-       os:system_time(Unit)
-    catch
-       error:badarg ->
-           erlang:error(badarg, [Unit]);
-       error:undef ->
-           STime = os_system_time_fallback(),
-           try
-               convert_time_unit_fallback(STime, native, Unit)
-           catch
-               error:bad_time_unit -> erlang:error(badarg, [Unit])
-           end
-    end.
+    code_version:update(?MODULE),
+    time_compat:os_system_time(Unit).
+
+os_system_time_post_18(Unit) ->
+    os:system_time(Unit).
+
+os_system_time_pre_18(Unit) ->
+    STime = os_system_time_fallback(),
+    convert_time_unit_fallback(STime, native, Unit).
 
 time_offset() ->
-    try
-       erlang:time_offset()
-    catch
-       error:undef ->
-           %% Erlang system time and Erlang monotonic
-           %% time are always aligned
-           0
-    end.
+    code_version:update(?MODULE),
+    time_compat:time_offset().
+
+time_offset_post_18() ->
+       erlang:time_offset().
+
+time_offset_pre_18() ->
+    %% Erlang system time and Erlang monotonic
+    %% time are always aligned
+    0.
 
 time_offset(Unit) ->
+    code_version:update(?MODULE),
+    time_compat:time_offset(Unit).
+
+time_offset_post_18(Unit) ->
+    erlang:time_offset(Unit).
+
+time_offset_pre_18(Unit) ->
+    _ = integer_time_unit(Unit),
+    %% Erlang system time and Erlang monotonic
+    %% time are always aligned
+    0.
+
+convert_time_unit(Time, FromUnit, ToUnit) ->
+    code_version:update(?MODULE),
+    time_compat:convert_time_unit(Time, FromUnit, ToUnit).
+
+convert_time_unit_post_18(Time, FromUnit, ToUnit) ->
     try
-       erlang:time_offset(Unit)
+        erlang:convert_time_unit(Time, FromUnit, ToUnit)
     catch
-       error:badarg ->
-           erlang:error(badarg, [Unit]);
-       error:undef ->
-           try
-               _ = integer_time_unit(Unit)
-           catch
-               error:bad_time_unit -> erlang:error(badarg, [Unit])
-           end,
-           %% Erlang system time and Erlang monotonic
-           %% time are always aligned
-           0
+        error:Error ->
+           erlang:error(Error, [Time, FromUnit, ToUnit])
     end.
 
-convert_time_unit(Time, FromUnit, ToUnit) ->
+convert_time_unit_pre_18(Time, FromUnit, ToUnit) ->
     try
-       erlang:convert_time_unit(Time, FromUnit, ToUnit)
+        convert_time_unit_fallback(Time, FromUnit, ToUnit)
     catch
-       error:undef ->
-           try
-               convert_time_unit_fallback(Time, FromUnit, ToUnit)
-           catch
                _:_ ->
                    erlang:error(badarg, [Time, FromUnit, ToUnit])
-           end;
-       error:Error ->
-           erlang:error(Error, [Time, FromUnit, ToUnit])
     end.
 
 timestamp() ->
-    try
-       erlang:timestamp()
-    catch
-       error:undef ->
-           erlang:now()
-    end.
+    code_version:update(?MODULE),
+    time_compat:timestamp().
+
+timestamp_post_18() ->
+       erlang:timestamp().
+
+timestamp_pre_18() ->
+    erlang:now().
 
 unique_integer() ->
-    try
-       erlang:unique_integer()
-    catch
-       error:undef ->
-           {MS, S, US} = erlang:now(),
-           (MS*1000000+S)*1000000+US
-    end.
+    code_version:update(?MODULE),
+    time_compat:unique_integer().
+
+unique_integer_post_18() ->
+       erlang:unique_integer().
+
+unique_integer_pre_18() ->
+    {MS, S, US} = erlang:now(),
+    (MS*1000000+S)*1000000+US.
 
 unique_integer(Modifiers) ->
-    try
-       erlang:unique_integer(Modifiers)
-    catch
-       error:badarg ->
-           erlang:error(badarg, [Modifiers]);
-       error:undef ->
-           case is_valid_modifier_list(Modifiers) of
+    code_version:update(?MODULE),
+    time_compat:unique_integer(Modifiers).
+
+unique_integer_post_18(Modifiers) ->
+    erlang:unique_integer(Modifiers).
+
+unique_integer_pre_18(Modifiers) ->
+    case is_valid_modifier_list(Modifiers) of
                true ->
                    %% now() converted to an integer
                    %% fullfill the requirements of
@@ -206,7 +249,6 @@ unique_integer(Modifiers) ->
                    (MS*1000000+S)*1000000+US;
                false ->
                    erlang:error(badarg, [Modifiers])
-           end
     end.
 
 monitor(Type, Item) ->
@@ -277,7 +319,7 @@ integer_time_unit(micro_seconds) -> 1000*1000;
 integer_time_unit(milli_seconds) -> 1000;
 integer_time_unit(seconds) -> 1;
 integer_time_unit(I) when is_integer(I), I > 0 -> I;
-integer_time_unit(BadRes) -> erlang:error(bad_time_unit, [BadRes]).
+integer_time_unit(BadRes) -> erlang:error(badarg, [BadRes]).
 
 erlang_system_time_fallback() ->
     {MS, S, US} = erlang:now(),
diff --git a/rabbitmq-server/deps/rabbit_common/tools/tls-certs/Makefile b/rabbitmq-server/deps/rabbit_common/tools/tls-certs/Makefile
new file mode 100644 (file)
index 0000000..7799587
--- /dev/null
@@ -0,0 +1,67 @@
+ifndef DIR
+$(error DIR must be specified)
+endif
+
+PASSWORD ?= changeme
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+gen_verbose_0 = @echo " GEN   " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+openssl_output_0 = 2>/dev/null
+openssl_output = $(openssl_output_$(V))
+
+.PRECIOUS: %/testca/cacert.pem
+.PHONY: all testca server client clean
+
+all: server client
+       @:
+
+testca: $(DIR)/testca/cacert.pem
+
+server: TARGET = server
+server: $(DIR)/server/cert.pem
+       @:
+
+client: TARGET = client
+client: $(DIR)/client/cert.pem
+       @:
+
+$(DIR)/testca/cacert.pem:
+       $(gen_verbose) mkdir -p $(dir $@)
+       $(verbose) { ( cd $(dir $@) && \
+           mkdir -p certs private && \
+           chmod 700 private && \
+           echo 01 > serial && \
+           :> index.txt && \
+           openssl req -x509 -config $(CURDIR)/openssl.cnf -newkey rsa:2048 -days 365 \
+             -out cacert.pem -outform PEM -subj /CN=MyTestCA/L=$$$$/ -nodes && \
+           openssl x509 -in cacert.pem -out cacert.cer -outform DER ) $(openssl_output) \
+         || (rm -rf $(dir $@) && false); }
+
+$(DIR)/%/cert.pem: $(DIR)/testca/cacert.pem
+       $(gen_verbose) mkdir -p $(DIR)/$(TARGET)
+       $(verbose) { ( cd $(DIR)/$(TARGET) && \
+           openssl genrsa -out key.pem 2048 &&\
+           openssl req -new -key key.pem -out req.pem -outform PEM\
+               -subj /CN=$$(hostname)/O=$(TARGET)/L=$$$$/ -nodes &&\
+           cd ../testca && \
+           openssl ca -config $(CURDIR)/openssl.cnf -in ../$(TARGET)/req.pem -out \
+             ../$(TARGET)/cert.pem -notext -batch -extensions \
+             $(TARGET)_ca_extensions && \
+           cd ../$(TARGET) && \
+           openssl pkcs12 -export -out keycert.p12 -in cert.pem -inkey key.pem \
+           -passout pass:$(PASSWORD) ) $(openssl_output) || (rm -rf $(DIR)/$(TARGET) && false); }
+
+clean:
+       rm -rf $(DIR)/testca
+       rm -rf $(DIR)/server
+       rm -rf $(DIR)/client
diff --git a/rabbitmq-server/deps/rabbit_common/tools/tls-certs/openssl.cnf b/rabbitmq-server/deps/rabbit_common/tools/tls-certs/openssl.cnf
new file mode 100644 (file)
index 0000000..93ffb2f
--- /dev/null
@@ -0,0 +1,54 @@
+[ ca ]
+default_ca = testca
+
+[ testca ]
+dir = .
+certificate = $dir/cacert.pem
+database = $dir/index.txt
+new_certs_dir = $dir/certs
+private_key = $dir/private/cakey.pem
+serial = $dir/serial
+
+default_crl_days = 7
+default_days = 365
+default_md = sha1
+
+policy = testca_policy
+x509_extensions = certificate_extensions
+
+[ testca_policy ]
+commonName = supplied
+stateOrProvinceName = optional
+countryName = optional
+emailAddress = optional
+organizationName = optional
+organizationalUnitName = optional
+domainComponent = optional
+
+[ certificate_extensions ]
+basicConstraints = CA:false
+
+[ req ]
+default_bits = 2048
+default_keyfile = ./private/cakey.pem
+default_md = sha1
+prompt = yes
+distinguished_name = root_ca_distinguished_name
+x509_extensions = root_ca_extensions
+
+[ root_ca_distinguished_name ]
+commonName = hostname
+
+[ root_ca_extensions ]
+basicConstraints = CA:true
+keyUsage = keyCertSign, cRLSign
+
+[ client_ca_extensions ]
+basicConstraints = CA:false
+keyUsage = digitalSignature
+extendedKeyUsage = 1.3.6.1.5.5.7.3.2
+
+[ server_ca_extensions ]
+basicConstraints = CA:false
+keyUsage = keyEncipherment
+extendedKeyUsage = 1.3.6.1.5.5.7.3.1
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_amqp1_0/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index e9e612b0fb71a3d5c3a6868d427754a2224a36e0..d67b4045717a42453aa10b2e42932a1c5e5b2a77 100644 (file)
@@ -1,8 +1,7 @@
 PROJECT = rabbitmq_amqp1_0
 
 DEPS = amqp_client
-
-TEST_DEPS = rabbit rabbitmq_java_client
+TEST_DEPS += rabbit
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
@@ -19,6 +18,10 @@ ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
 ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
+
+# FIXME: Remove rabbitmq_test as TEST_DEPS from here for now.
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
+
 include erlang.mk
 
 # --------------------------------------------------------------------
@@ -47,15 +50,12 @@ clean:: clean-extra-sources
 clean-extra-sources:
        $(gen_verbose) rm -f $(EXTRA_SOURCES)
 
-# --------------------------------------------------------------------
-# Testing.
-# --------------------------------------------------------------------
-
-WITH_BROKER_TEST_SCRIPTS := $(CURDIR)/test/swiftmq/run-tests.sh
-
-STANDALONE_TEST_COMMANDS := eunit:test(rabbit_amqp1_0_test,[verbose])
+distclean:: distclean-dotnet-tests distclean-java-tests
 
-distclean:: distclean-swiftmq
+distclean-dotnet-tests:
+       $(gen_verbose) cd test/system_SUITE_data/dotnet-tests && \
+               rm -rf bin obj && \
+               rm -f project.lock.json TestResult.xml
 
-distclean-swiftmq:
-       $(gen_verbose) $(MAKE) -C test/swiftmq clean
+distclean-java-tests:
+       $(gen_verbose) cd test/system_SUITE_data/java-tests && mvn clean
index 9f0c0c38494c4beabf27ccddfa996d51d66a91d8..f7ca7bebb76849368b9a6bf56f1bdea9e847604d 100644 (file)
@@ -16,7 +16,7 @@
 
 ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
 
-ERLANG_MK_VERSION = 2.0.0-pre.2-16-gb52203c-dirty
+ERLANG_MK_VERSION = 2.0.0-pre.2-132-g62d576b
 
 # Core configuration.
 
@@ -24,6 +24,7 @@ PROJECT ?= $(notdir $(CURDIR))
 PROJECT := $(strip $(PROJECT))
 
 PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
 
 # Verbosity.
 
@@ -84,7 +85,7 @@ all:: deps app rel
 rel::
        $(verbose) :
 
-check:: clean app tests
+check:: tests
 
 clean:: clean-crashdump
 
@@ -283,7 +284,7 @@ pkg_apns_description = Apple Push Notification Server for Erlang
 pkg_apns_homepage = http://inaka.github.com/apns4erl
 pkg_apns_fetch = git
 pkg_apns_repo = https://github.com/inaka/apns4erl
-pkg_apns_commit = 1.0.4
+pkg_apns_commit = master
 
 PACKAGES += azdht
 pkg_azdht_name = azdht
@@ -387,7 +388,7 @@ pkg_bitcask_description = because you need another a key/value storage engine
 pkg_bitcask_homepage = https://github.com/basho/bitcask
 pkg_bitcask_fetch = git
 pkg_bitcask_repo = https://github.com/basho/bitcask
-pkg_bitcask_commit = master
+pkg_bitcask_commit = develop
 
 PACKAGES += bitstore
 pkg_bitstore_name = bitstore
@@ -421,6 +422,14 @@ pkg_boss_db_fetch = git
 pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
 pkg_boss_db_commit = master
 
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
 PACKAGES += bson
 pkg_bson_name = bson
 pkg_bson_description = BSON documents in Erlang, see bsonspec.org
@@ -451,7 +460,7 @@ pkg_cake_description = Really simple terminal colorization
 pkg_cake_homepage = https://github.com/darach/cake-erl
 pkg_cake_fetch = git
 pkg_cake_repo = https://github.com/darach/cake-erl
-pkg_cake_commit = v0.1.2
+pkg_cake_commit = master
 
 PACKAGES += carotene
 pkg_carotene_name = carotene
@@ -787,7 +796,7 @@ pkg_cowboy_description = Small, fast and modular HTTP server.
 pkg_cowboy_homepage = http://ninenines.eu
 pkg_cowboy_fetch = git
 pkg_cowboy_repo = https://github.com/ninenines/cowboy
-pkg_cowboy_commit = 1.0.1
+pkg_cowboy_commit = 1.0.4
 
 PACKAGES += cowdb
 pkg_cowdb_name = cowdb
@@ -803,7 +812,7 @@ pkg_cowlib_description = Support library for manipulating Web protocols.
 pkg_cowlib_homepage = http://ninenines.eu
 pkg_cowlib_fetch = git
 pkg_cowlib_repo = https://github.com/ninenines/cowlib
-pkg_cowlib_commit = 1.0.1
+pkg_cowlib_commit = 1.0.2
 
 PACKAGES += cpg
 pkg_cpg_name = cpg
@@ -885,14 +894,6 @@ pkg_dh_date_fetch = git
 pkg_dh_date_repo = https://github.com/daleharvey/dh_date
 pkg_dh_date_commit = master
 
-PACKAGES += dhtcrawler
-pkg_dhtcrawler_name = dhtcrawler
-pkg_dhtcrawler_description = dhtcrawler is a DHT crawler written in erlang. It can join a DHT network and crawl many P2P torrents.
-pkg_dhtcrawler_homepage = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_fetch = git
-pkg_dhtcrawler_repo = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_commit = master
-
 PACKAGES += dirbusterl
 pkg_dirbusterl_name = dirbusterl
 pkg_dirbusterl_description = DirBuster successor in Erlang
@@ -1053,14 +1054,6 @@ pkg_efene_fetch = git
 pkg_efene_repo = https://github.com/efene/efene
 pkg_efene_commit = master
 
-PACKAGES += eganglia
-pkg_eganglia_name = eganglia
-pkg_eganglia_description = Erlang library to interact with Ganglia
-pkg_eganglia_homepage = https://github.com/inaka/eganglia
-pkg_eganglia_fetch = git
-pkg_eganglia_repo = https://github.com/inaka/eganglia
-pkg_eganglia_commit = v0.9.1
-
 PACKAGES += egeoip
 pkg_egeoip_name = egeoip
 pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
@@ -1075,7 +1068,7 @@ pkg_ehsa_description = Erlang HTTP server basic and digest authentication module
 pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
 pkg_ehsa_fetch = hg
 pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
-pkg_ehsa_commit = 2.0.4
+pkg_ehsa_commit = default
 
 PACKAGES += ej
 pkg_ej_name = ej
@@ -1139,7 +1132,7 @@ pkg_elvis_description = Erlang Style Reviewer
 pkg_elvis_homepage = https://github.com/inaka/elvis
 pkg_elvis_fetch = git
 pkg_elvis_repo = https://github.com/inaka/elvis
-pkg_elvis_commit = 0.2.4
+pkg_elvis_commit = master
 
 PACKAGES += emagick
 pkg_emagick_name = emagick
@@ -1515,7 +1508,7 @@ pkg_erwa_description = A WAMP router and client written in Erlang.
 pkg_erwa_homepage = https://github.com/bwegh/erwa
 pkg_erwa_fetch = git
 pkg_erwa_repo = https://github.com/bwegh/erwa
-pkg_erwa_commit = 0.1.1
+pkg_erwa_commit = master
 
 PACKAGES += espec
 pkg_espec_name = espec
@@ -1619,7 +1612,7 @@ pkg_exometer_description = Basic measurement objects and probe behavior
 pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
 pkg_exometer_fetch = git
 pkg_exometer_repo = https://github.com/Feuerlabs/exometer
-pkg_exometer_commit = 1.2
+pkg_exometer_commit = master
 
 PACKAGES += exs1024
 pkg_exs1024_name = exs1024
@@ -1683,7 +1676,15 @@ pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
 pkg_feeder_homepage = https://github.com/michaelnisi/feeder
 pkg_feeder_fetch = git
 pkg_feeder_repo = https://github.com/michaelnisi/feeder
-pkg_feeder_commit = v1.4.6
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
 
 PACKAGES += fix
 pkg_fix_name = fix
@@ -1781,6 +1782,14 @@ pkg_geef_fetch = git
 pkg_geef_repo = https://github.com/carlosmn/geef
 pkg_geef_commit = master
 
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
 PACKAGES += gen_cycle
 pkg_gen_cycle_name = gen_cycle
 pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
@@ -1837,6 +1846,14 @@ pkg_gen_unix_fetch = git
 pkg_gen_unix_repo = https://github.com/msantos/gen_unix
 pkg_gen_unix_commit = master
 
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
 PACKAGES += getopt
 pkg_getopt_name = getopt
 pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
@@ -1981,13 +1998,21 @@ pkg_hyper_fetch = git
 pkg_hyper_repo = https://github.com/GameAnalytics/hyper
 pkg_hyper_commit = master
 
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
 PACKAGES += ibrowse
 pkg_ibrowse_name = ibrowse
 pkg_ibrowse_description = Erlang HTTP client
 pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
 pkg_ibrowse_fetch = git
 pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
-pkg_ibrowse_commit = v4.1.1
+pkg_ibrowse_commit = master
 
 PACKAGES += ierlang
 pkg_ierlang_name = ierlang
@@ -2043,7 +2068,7 @@ pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
 pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
 pkg_jamdb_sybase_fetch = git
 pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
-pkg_jamdb_sybase_commit = 0.6.0
+pkg_jamdb_sybase_commit = master
 
 PACKAGES += jerg
 pkg_jerg_name = jerg
@@ -2056,9 +2081,9 @@ pkg_jerg_commit = master
 PACKAGES += jesse
 pkg_jesse_name = jesse
 pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
-pkg_jesse_homepage = https://github.com/klarna/jesse
+pkg_jesse_homepage = https://github.com/for-GET/jesse
 pkg_jesse_fetch = git
-pkg_jesse_repo = https://github.com/klarna/jesse
+pkg_jesse_repo = https://github.com/for-GET/jesse
 pkg_jesse_commit = master
 
 PACKAGES += jiffy
@@ -2075,7 +2100,7 @@ pkg_jiffy_v_description = JSON validation utility
 pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
 pkg_jiffy_v_fetch = git
 pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
-pkg_jiffy_v_commit = 0.3.3
+pkg_jiffy_v_commit = master
 
 PACKAGES += jobs
 pkg_jobs_name = jobs
@@ -2083,7 +2108,7 @@ pkg_jobs_description = a Job scheduler for load regulation
 pkg_jobs_homepage = https://github.com/esl/jobs
 pkg_jobs_fetch = git
 pkg_jobs_repo = https://github.com/esl/jobs
-pkg_jobs_commit = 0.3
+pkg_jobs_commit = master
 
 PACKAGES += joxa
 pkg_joxa_name = joxa
@@ -2109,6 +2134,14 @@ pkg_json_rec_fetch = git
 pkg_json_rec_repo = https://github.com/justinkirby/json_rec
 pkg_json_rec_commit = master
 
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
 PACKAGES += jsonerl
 pkg_jsonerl_name = jsonerl
 pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
@@ -2149,6 +2182,14 @@ pkg_kafka_fetch = git
 pkg_kafka_repo = https://github.com/wooga/kafka-erlang
 pkg_kafka_commit = master
 
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
 PACKAGES += kai
 pkg_kai_name = kai
 pkg_kai_description = DHT storage by Takeshi Inoue
@@ -2291,7 +2332,7 @@ pkg_lasse_description = SSE handler for Cowboy
 pkg_lasse_homepage = https://github.com/inaka/lasse
 pkg_lasse_fetch = git
 pkg_lasse_repo = https://github.com/inaka/lasse
-pkg_lasse_commit = 0.1.0
+pkg_lasse_commit = master
 
 PACKAGES += ldap
 pkg_ldap_name = ldap
@@ -2501,6 +2542,14 @@ pkg_merl_fetch = git
 pkg_merl_repo = https://github.com/richcarl/merl
 pkg_merl_commit = master
 
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
 PACKAGES += mimetypes
 pkg_mimetypes_name = mimetypes
 pkg_mimetypes_description = Erlang MIME types library
@@ -2733,21 +2782,13 @@ pkg_oauth2_fetch = git
 pkg_oauth2_repo = https://github.com/kivra/oauth2
 pkg_oauth2_commit = master
 
-PACKAGES += oauth2c
-pkg_oauth2c_name = oauth2c
-pkg_oauth2c_description = Erlang OAuth2 Client
-pkg_oauth2c_homepage = https://github.com/kivra/oauth2_client
-pkg_oauth2c_fetch = git
-pkg_oauth2c_repo = https://github.com/kivra/oauth2_client
-pkg_oauth2c_commit = master
-
 PACKAGES += octopus
 pkg_octopus_name = octopus
 pkg_octopus_description = Small and flexible pool manager written in Erlang
 pkg_octopus_homepage = https://github.com/erlangbureau/octopus
 pkg_octopus_fetch = git
 pkg_octopus_repo = https://github.com/erlangbureau/octopus
-pkg_octopus_commit = 1.0.0
+pkg_octopus_commit = master
 
 PACKAGES += of_protocol
 pkg_of_protocol_name = of_protocol
@@ -2819,7 +2860,7 @@ pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
 pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
 pkg_pegjs_fetch = git
 pkg_pegjs_repo = https://github.com/dmitriid/pegjs
-pkg_pegjs_commit = 0.3
+pkg_pegjs_commit = master
 
 PACKAGES += percept2
 pkg_percept2_name = percept2
@@ -2987,7 +3028,7 @@ pkg_qdate_description = Date, time, and timezone parsing, formatting, and conver
 pkg_qdate_homepage = https://github.com/choptastic/qdate
 pkg_qdate_fetch = git
 pkg_qdate_repo = https://github.com/choptastic/qdate
-pkg_qdate_commit = 0.4.0
+pkg_qdate_commit = master
 
 PACKAGES += qrcode
 pkg_qrcode_name = qrcode
@@ -3059,7 +3100,7 @@ pkg_ranch_description = Socket acceptor pool for TCP protocols.
 pkg_ranch_homepage = http://ninenines.eu
 pkg_ranch_fetch = git
 pkg_ranch_repo = https://github.com/ninenines/ranch
-pkg_ranch_commit = 1.1.0
+pkg_ranch_commit = 1.2.1
 
 PACKAGES += rbeacon
 pkg_rbeacon_name = rbeacon
@@ -3099,7 +3140,7 @@ pkg_recon_description = Collection of functions and scripts to debug Erlang in p
 pkg_recon_homepage = https://github.com/ferd/recon
 pkg_recon_fetch = git
 pkg_recon_repo = https://github.com/ferd/recon
-pkg_recon_commit = 2.2.1
+pkg_recon_commit = master
 
 PACKAGES += record_info
 pkg_record_info_name = record_info
@@ -3293,6 +3334,14 @@ pkg_rlimit_fetch = git
 pkg_rlimit_repo = https://github.com/jlouis/rlimit
 pkg_rlimit_commit = master
 
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
 PACKAGES += safetyvalve
 pkg_safetyvalve_name = safetyvalve
 pkg_safetyvalve_description = A safety valve for your erlang node
@@ -3363,7 +3412,7 @@ pkg_shotgun_description = better than just a gun
 pkg_shotgun_homepage = https://github.com/inaka/shotgun
 pkg_shotgun_fetch = git
 pkg_shotgun_repo = https://github.com/inaka/shotgun
-pkg_shotgun_commit = 0.1.0
+pkg_shotgun_commit = master
 
 PACKAGES += sidejob
 pkg_sidejob_name = sidejob
@@ -3421,6 +3470,14 @@ pkg_skel_fetch = git
 pkg_skel_repo = https://github.com/ParaPhrase/skel
 pkg_skel_commit = master
 
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
 PACKAGES += smother
 pkg_smother_name = smother
 pkg_smother_description = Extended code coverage metrics for Erlang.
@@ -3533,6 +3590,14 @@ pkg_stripe_fetch = git
 pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
 pkg_stripe_commit = v1
 
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
 PACKAGES += surrogate
 pkg_surrogate_name = surrogate
 pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
@@ -3567,7 +3632,7 @@ pkg_switchboard_commit = master
 
 PACKAGES += syn
 pkg_syn_name = syn
-pkg_syn_description = A global process registry for Erlang.
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
 pkg_syn_homepage = https://github.com/ostinelli/syn
 pkg_syn_fetch = git
 pkg_syn_repo = https://github.com/ostinelli/syn
@@ -3739,7 +3804,7 @@ pkg_unicorn_description = Generic configuration server
 pkg_unicorn_homepage = https://github.com/shizzard/unicorn
 pkg_unicorn_fetch = git
 pkg_unicorn_repo = https://github.com/shizzard/unicorn
-pkg_unicorn_commit = 0.3.0
+pkg_unicorn_commit = master
 
 PACKAGES += unsplit
 pkg_unsplit_name = unsplit
@@ -3755,7 +3820,7 @@ pkg_uuid_description = Erlang UUID Implementation
 pkg_uuid_homepage = https://github.com/okeuday/uuid
 pkg_uuid_fetch = git
 pkg_uuid_repo = https://github.com/okeuday/uuid
-pkg_uuid_commit = v1.4.0
+pkg_uuid_commit = master
 
 PACKAGES += ux
 pkg_ux_name = ux
@@ -3875,7 +3940,7 @@ pkg_worker_pool_description = a simple erlang worker pool
 pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
 pkg_worker_pool_fetch = git
 pkg_worker_pool_repo = https://github.com/inaka/worker_pool
-pkg_worker_pool_commit = 1.0.3
+pkg_worker_pool_commit = master
 
 PACKAGES += wrangler
 pkg_wrangler_name = wrangler
@@ -3907,7 +3972,7 @@ pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
 pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
 pkg_xref_runner_fetch = git
 pkg_xref_runner_repo = https://github.com/inaka/xref_runner
-pkg_xref_runner_commit = 0.2.0
+pkg_xref_runner_commit = master
 
 PACKAGES += yamerl
 pkg_yamerl_name = yamerl
@@ -3941,13 +4006,21 @@ pkg_zab_engine_fetch = git
 pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
 pkg_zab_engine_commit = master
 
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
 PACKAGES += zeta
 pkg_zeta_name = zeta
 pkg_zeta_description = HTTP access log parser in Erlang
 pkg_zeta_homepage = https://github.com/s1n4/zeta
 pkg_zeta_fetch = git
 pkg_zeta_repo = https://github.com/s1n4/zeta
-pkg_zeta_commit =  
+pkg_zeta_commit = master
 
 PACKAGES += zippers
 pkg_zippers_name = zippers
@@ -4063,6 +4136,9 @@ deps::
 else
 deps:: $(ALL_DEPS_DIRS)
 ifndef IS_APP
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               mkdir -p $$dep/ebin; \
+       done
        $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
                $(MAKE) -C $$dep IS_APP=1 || exit $$?; \
        done
@@ -4092,7 +4168,10 @@ endif
 # While Makefile file could be GNUmakefile or makefile,
 # in practice only Makefile is needed so far.
 define dep_autopatch
-       if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+               $(call dep_autopatch_erlang_mk,$(1)); \
+       elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
                if [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
                        $(call dep_autopatch2,$(1)); \
                elif [ 0 != `grep -ci rebar $(DEPS_DIR)/$(1)/Makefile` ]; then \
@@ -4100,12 +4179,7 @@ define dep_autopatch
                elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i rebar '{}' \;`" ]; then \
                        $(call dep_autopatch2,$(1)); \
                else \
-                       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
-                               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
-                               $(call dep_autopatch_erlang_mk,$(1)); \
-                       else \
-                               $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
-                       fi \
+                       $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
                fi \
        else \
                if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
@@ -4117,8 +4191,11 @@ define dep_autopatch
 endef
 
 define dep_autopatch2
+       if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+               $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+       fi; \
        $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
-       if [ -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
+       if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
                $(call dep_autopatch_fetch_rebar); \
                $(call dep_autopatch_rebar,$(1)); \
        else \
@@ -4256,57 +4333,6 @@ define dep_autopatch_rebar.erl
                                Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
                end
        end(),
-       FindFirst = fun(F, Fd) ->
-               case io:parse_erl_form(Fd, undefined) of
-                       {ok, {attribute, _, compile, {parse_transform, PT}}, _} ->
-                               [PT, F(F, Fd)];
-                       {ok, {attribute, _, compile, CompileOpts}, _} when is_list(CompileOpts) ->
-                               case proplists:get_value(parse_transform, CompileOpts) of
-                                       undefined -> [F(F, Fd)];
-                                       PT -> [PT, F(F, Fd)]
-                               end;
-                       {ok, {attribute, _, include, Hrl}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
-                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                       _ ->
-                                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ Hrl, [read]) of
-                                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                                       _ -> [F(F, Fd)]
-                                               end
-                               end;
-                       {ok, {attribute, _, include_lib, "$(1)/include/" ++ Hrl}, _} ->
-                               {ok, HrlFd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]),
-                               [F(F, HrlFd), F(F, Fd)];
-                       {ok, {attribute, _, include_lib, Hrl}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
-                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                       _ -> [F(F, Fd)]
-                               end;
-                       {ok, {attribute, _, import, {Imp, _}}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(Imp) ++ ".erl", [read]) of
-                                       {ok, ImpFd} -> [Imp, F(F, ImpFd), F(F, Fd)];
-                                       _ -> [F(F, Fd)]
-                               end;
-                       {eof, _} ->
-                               file:close(Fd),
-                               [];
-                       _ ->
-                               F(F, Fd)
-               end
-       end,
-       fun() ->
-               ErlFiles = filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)*.erl"),
-               First0 = lists:usort(lists:flatten([begin
-                       {ok, Fd} = file:open(F, [read]),
-                       FindFirst(FindFirst, Fd)
-               end || F <- ErlFiles])),
-               First = lists:flatten([begin
-                       {ok, Fd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", [read]),
-                       FindFirst(FindFirst, Fd)
-               end || M <- First0, lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)]) ++ First0,
-               Write(["COMPILE_FIRST +=", [[" ", atom_to_list(M)] || M <- First,
-                       lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)], "\n"])
-       end(),
        Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
        Write("\npreprocess::\n"),
        Write("\npre-deps::\n"),
@@ -4374,9 +4400,9 @@ define dep_autopatch_rebar.erl
                [] -> ok;
                _ ->
                        Write("\npre-app::\n\t$$\(MAKE) -f c_src/Makefile.erlang.mk\n"),
-                       PortSpecWrite(io_lib:format("ERL_CFLAGS = -finline-functions -Wall -fPIC -I ~s/erts-~s/include -I ~s\n",
+                       PortSpecWrite(io_lib:format("ERL_CFLAGS = -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
                                [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
-                       PortSpecWrite(io_lib:format("ERL_LDFLAGS = -L ~s -lerl_interface -lei\n",
+                       PortSpecWrite(io_lib:format("ERL_LDFLAGS = -L \\"~s\\" -lerl_interface -lei\n",
                                [code:lib_dir(erl_interface, lib)])),
                        [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
                        FilterEnv = fun(Env) ->
@@ -4419,9 +4445,10 @@ define dep_autopatch_rebar.erl
                                        Output, ": $$\(foreach ext,.c .C .cc .cpp,",
                                                "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
                                        "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
-                                       case filename:extension(Output) of
-                                               [] -> "\n";
-                                               _ -> " -shared\n"
+                                       case {filename:extension(Output), $(PLATFORM)} of
+                                           {[], _} -> "\n";
+                                           {_, darwin} -> "\n";
+                                           _ -> " -shared\n"
                                        end])
                        end,
                        [PortSpec(S) || S <- PortSpecs]
@@ -4490,6 +4517,15 @@ define dep_autopatch_app.erl
        halt()
 endef
 
+define dep_autopatch_appsrc_script.erl
+       AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+       AppSrcScript = AppSrc ++ ".script",
+       Bindings = erl_eval:new_bindings(),
+       {ok, Conf} = file:script(AppSrcScript, Bindings),
+       ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+       halt()
+endef
+
 define dep_autopatch_appsrc.erl
        AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
        AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
@@ -4576,10 +4612,11 @@ $(DEPS_DIR)/$(call dep_name,$1):
                exit 17; \
        fi
        $(verbose) mkdir -p $(DEPS_DIR)
-       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$1)),$1)
-       $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure.ac -o -f $(DEPS_DIR)/$(DEP_NAME)/configure.in ]; then \
-               echo " AUTO  " $(DEP_STR); \
-               cd $(DEPS_DIR)/$(DEP_NAME) && autoreconf -Wall -vif -I m4; \
+       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+       $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+                       && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+               echo " AUTO  " $(1); \
+               cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
        fi
        - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
                echo " CONF  " $(DEP_STR); \
@@ -4664,6 +4701,7 @@ $(foreach p,$(DEP_PLUGINS),\
 DTL_FULL_PATH ?=
 DTL_PATH ?= templates/
 DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
 
 # Verbosity.
 
@@ -4672,28 +4710,10 @@ dtl_verbose = $(dtl_verbose_$(V))
 
 # Core targets.
 
-define erlydtl_compile.erl
-       [begin
-               Module0 = case "$(strip $(DTL_FULL_PATH))" of
-                       "" ->
-                               filename:basename(F, ".dtl");
-                       _ ->
-                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
-                               re:replace(F2, "/",  "_",  [{return, list}, global])
-               end,
-               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
-               case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
-                       ok -> ok;
-                       {ok, _} -> ok
-               end
-       end || F <- string:tokens("$(1)", " ")],
-       halt().
-endef
-
-ifneq ($(wildcard src/),)
-
 DTL_FILES = $(sort $(call core_find,$(DTL_PATH),*.dtl))
 
+ifneq ($(DTL_FILES),)
+
 ifdef DTL_FULL_PATH
 BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(subst /,_,$(DTL_FILES:$(DTL_PATH)%=%))))
 else
@@ -4701,7 +4721,7 @@ BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(notdir $(DTL_FILES
 endif
 
 ifneq ($(words $(DTL_FILES)),0)
-# Rebuild everything when the Makefile changes.
+# Rebuild templates when the Makefile changes.
 $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
        @mkdir -p $(ERLANG_MK_TMP)
        @if test -f $@; then \
@@ -4712,9 +4732,28 @@ $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
 ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
 endif
 
-ebin/$(PROJECT).app:: $(DTL_FILES)
+define erlydtl_compile.erl
+       [begin
+               Module0 = case "$(strip $(DTL_FULL_PATH))" of
+                       "" ->
+                               filename:basename(F, ".dtl");
+                       _ ->
+                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
+                               re:replace(F2, "/",  "_",  [{return, list}, global])
+               end,
+               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+               case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
+                       ok -> ok;
+                       {ok, _} -> ok
+               end
+       end || F <- string:tokens("$(1)", " ")],
+       halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
        $(if $(strip $?),\
-               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?,-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/)))
+               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?),-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/))
+
 endif
 
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
@@ -4810,7 +4849,7 @@ app:: clean deps $(PROJECT).d
        $(verbose) $(MAKE) --no-print-directory app-build
 endif
 
-ifeq ($(wildcard src/$(PROJECT)_app.erl),)
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
 define app_file
 {application, $(PROJECT), [
        {description, "$(PROJECT_DESCRIPTION)"},
@@ -4830,7 +4869,7 @@ define app_file
        {modules, [$(call comma_list,$(2))]},
        {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
        {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS))]},
-       {mod, {$(PROJECT)_app, []}}
+       {mod, {$(PROJECT_MOD), []}}
 ]}.
 endef
 endif
@@ -4888,51 +4927,79 @@ $(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
 # Erlang and Core Erlang files.
 
 define makedep.erl
+       E = ets:new(makedep, [bag]),
+       G = digraph:new([acyclic]),
        ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
-       Modules = [{filename:basename(F, ".erl"), F} || F <- ErlFiles],
-       Add = fun (Dep, Acc) ->
-               case lists:keyfind(atom_to_list(Dep), 1, Modules) of
-                       {_, DepFile} -> [DepFile|Acc];
-                       false -> Acc
+       Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+       Add = fun (Mod, Dep) ->
+               case lists:keyfind(Dep, 1, Modules) of
+                       false -> ok;
+                       {_, DepFile} ->
+                               {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+                               ets:insert(E, {ModFile, DepFile}),
+                               digraph:add_vertex(G, Mod),
+                               digraph:add_vertex(G, Dep),
+                               digraph:add_edge(G, Mod, Dep)
                end
        end,
-       AddHd = fun (Dep, Acc) ->
-               case {Dep, lists:keymember(Dep, 2, Modules)} of
-                       {"src/" ++ _, false} -> [Dep|Acc];
-                       {"include/" ++ _, false} -> [Dep|Acc];
-                       _ -> Acc
+       AddHd = fun (F, Mod, DepFile) ->
+               case file:open(DepFile, [read]) of
+                       {error, enoent} -> ok;
+                       {ok, Fd} ->
+                               F(F, Fd, Mod),
+                               {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+                               ets:insert(E, {ModFile, DepFile})
                end
        end,
-       CompileFirst = fun (Deps) ->
-               First0 = [case filename:extension(D) of
-                       ".erl" -> filename:basename(D, ".erl");
-                       _ -> []
-               end || D <- Deps],
-               case lists:usort(First0) of
-                       [] -> [];
-                       [[]] -> [];
-                       First -> ["COMPILE_FIRST +=", [[" ", F] || F <- First], "\n"]
-               end
+       Attr = fun
+               (F, Mod, behavior, Dep) -> Add(Mod, Dep);
+               (F, Mod, behaviour, Dep) -> Add(Mod, Dep);
+               (F, Mod, compile, {parse_transform, Dep}) -> Add(Mod, Dep);
+               (F, Mod, compile, Opts) when is_list(Opts) ->
+                       case proplists:get_value(parse_transform, Opts) of
+                               undefined -> ok;
+                               Dep -> Add(Mod, Dep)
+                       end;
+               (F, Mod, include, Hrl) ->
+                       case filelib:is_file("include/" ++ Hrl) of
+                               true -> AddHd(F, Mod, "include/" ++ Hrl);
+                               false ->
+                                       case filelib:is_file("src/" ++ Hrl) of
+                                               true -> AddHd(F, Mod, "src/" ++ Hrl);
+                                               false -> false
+                                       end
+                       end;
+               (F, Mod, include_lib, "$1/include/" ++ Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+               (F, Mod, include_lib, Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+               (F, Mod, import, {Imp, _}) ->
+                       case filelib:is_file("src/" ++ atom_to_list(Imp) ++ ".erl") of
+                               false -> ok;
+                               true -> Add(Mod, Imp)
+                       end;
+               (_, _, _, _) -> ok
        end,
-       Depend = [begin
-               case epp:parse_file(F, ["include/"], []) of
-                       {ok, Forms} ->
-                               Deps = lists:usort(lists:foldl(fun
-                                       ({attribute, _, behavior, Dep}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, behaviour, Dep}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, compile, {parse_transform, Dep}}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, file, {Dep, _}}, Acc) -> AddHd(Dep, Acc);
-                                       (_, Acc) -> Acc
-                               end, [], Forms)),
-                               case Deps of
-                                       [] -> "";
-                                       _ -> [F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n", CompileFirst(Deps)]
-                               end;
-                       {error, enoent} ->
-                               []
+       MakeDepend = fun(F, Fd, Mod) ->
+               case io:parse_erl_form(Fd, undefined) of
+                       {ok, {attribute, _, Key, Value}, _} ->
+                               Attr(F, Mod, Key, Value),
+                               F(F, Fd, Mod);
+                       {eof, _} ->
+                               file:close(Fd);
+                       _ ->
+                               F(F, Fd, Mod)
                end
+       end,
+       [begin
+               Mod = list_to_atom(filename:basename(F, ".erl")),
+               {ok, Fd} = file:open(F, [read]),
+               MakeDepend(MakeDepend, Fd, Mod)
        end || F <- ErlFiles],
-       ok = file:write_file("$(1)", Depend),
+       Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+       CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+       ok = file:write_file("$(1)", [
+               [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+               "\nCOMPILE_FIRST +=", [[" ", atom_to_list(CF)] || CF <- CompileFirst], "\n"
+       ]),
        halt()
 endef
 
@@ -4977,13 +5044,13 @@ ifeq ($(wildcard src/$(PROJECT).app.src),)
        $(app_verbose) printf "$(subst $(newline),\n,$(subst ",\",$(call app_file,$(GITDESCRIBE),$(MODULES))))" \
                > ebin/$(PROJECT).app
 else
-       $(verbose) if [ -z "$$(grep -E '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+       $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
                echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk README for instructions." >&2; \
                exit 1; \
        fi
        $(appsrc_verbose) cat src/$(PROJECT).app.src \
                | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
-               | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(GITDESCRIBE)\"}/" \
+               | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
                > ebin/$(PROJECT).app
 endif
 
@@ -5069,6 +5136,11 @@ test-dir:
                $(call core_find,$(TEST_DIR)/,*.erl) -pa ebin/
 endif
 
+ifeq ($(wildcard src),)
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: clean deps test-deps
+       $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+else
 ifeq ($(wildcard ebin/test),)
 test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
 test-build:: clean deps test-deps $(PROJECT).d
@@ -5086,6 +5158,7 @@ clean-test-dir:
 ifneq ($(wildcard $(TEST_DIR)/*.beam),)
        $(gen_verbose) rm -f $(TEST_DIR)/*.beam
 endif
+endif
 
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -5095,7 +5168,7 @@ endif
 # We strip out -Werror because we don't want to fail due to
 # warnings when used as a dependency.
 
-compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/')
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
 
 define compat_convert_erlc_opts
 $(if $(filter-out -Werror,$1),\
@@ -5103,11 +5176,18 @@ $(if $(filter-out -Werror,$1),\
                $(shell echo $1 | cut -b 2-)))
 endef
 
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
 define compat_rebar_config
-{deps, [$(call comma_list,$(foreach d,$(DEPS),\
-       {$(call dep_name,$d),".*",{git,"$(call dep_repo,$d)","$(call dep_commit,$d)"}}))]}.
-{erl_opts, [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$(ERLC_OPTS)),\
-       $(call compat_convert_erlc_opts,$o)))]}.
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+       $(if $(filter hex,$(call dep_fetch,$d)),\
+               {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+               {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
 endef
 
 $(eval _compat_rebar_config = $$(compat_rebar_config))
@@ -5126,12 +5206,12 @@ MAN_SECTIONS ?= 3 7
 
 docs:: asciidoc
 
-asciidoc: distclean-asciidoc doc-deps asciidoc-guide asciidoc-manual
+asciidoc: asciidoc-guide asciidoc-manual
 
 ifeq ($(wildcard doc/src/guide/book.asciidoc),)
 asciidoc-guide:
 else
-asciidoc-guide:
+asciidoc-guide: distclean-asciidoc doc-deps
        a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
        a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
 endif
@@ -5139,7 +5219,7 @@ endif
 ifeq ($(wildcard doc/src/manual/*.asciidoc),)
 asciidoc-manual:
 else
-asciidoc-manual:
+asciidoc-manual: distclean-asciidoc doc-deps
        for f in doc/src/manual/*.asciidoc ; do \
                a2x -v -f manpage $$f ; \
        done
@@ -5154,7 +5234,7 @@ install-docs:: install-asciidoc
 install-asciidoc: asciidoc-manual
        for s in $(MAN_SECTIONS); do \
                mkdir -p $(MAN_INSTALL_PATH)/man$$s/ ; \
-               install -g 0 -o 0 -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
+               install -g `id -u` -o `id -g` -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
        done
 endif
 
@@ -5176,8 +5256,8 @@ help::
                "  bootstrap          Generate a skeleton of an OTP application" \
                "  bootstrap-lib      Generate a skeleton of an OTP library" \
                "  bootstrap-rel      Generate the files needed to build a release" \
-               "  new-app n=NAME     Create a new local OTP application NAME" \
-               "  new-lib n=NAME     Create a new local OTP library NAME" \
+               "  new-app in=NAME    Create a new local OTP application NAME" \
+               "  new-lib in=NAME    Create a new local OTP library NAME" \
                "  new t=TPL n=NAME   Generate a module NAME based on the template TPL" \
                "  new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
                "  list-templates     List available templates"
@@ -5214,6 +5294,8 @@ define bs_appsrc_lib
 ]}.
 endef
 
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
 ifdef SP
 define bs_Makefile
 PROJECT = $p
@@ -5223,17 +5305,21 @@ PROJECT_VERSION = 0.0.1
 # Whitespace to be used when creating files from templates.
 SP = $(SP)
 
-include erlang.mk
 endef
 else
 define bs_Makefile
 PROJECT = $p
-include erlang.mk
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
 endef
 endif
 
 define bs_apps_Makefile
 PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
 include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)/erlang.mk
 endef
 
@@ -5331,6 +5417,11 @@ code_change(_OldVsn, State, _Extra) ->
        {ok, State}.
 endef
 
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
 define tpl_cowboy_http
 -module($(n)).
 -behaviour(cowboy_http_handler).
@@ -5527,6 +5618,7 @@ endif
        $(eval p := $(PROJECT))
        $(eval n := $(PROJECT)_sup)
        $(call render_template,bs_Makefile,Makefile)
+       $(verbose) echo "include erlang.mk" >> Makefile
        $(verbose) mkdir src/
 ifdef LEGACY
        $(call render_template,bs_appsrc,src/$(PROJECT).app.src)
@@ -5540,6 +5632,7 @@ ifneq ($(wildcard src/),)
 endif
        $(eval p := $(PROJECT))
        $(call render_template,bs_Makefile,Makefile)
+       $(verbose) echo "include erlang.mk" >> Makefile
        $(verbose) mkdir src/
 ifdef LEGACY
        $(call render_template,bs_appsrc_lib,src/$(PROJECT).app.src)
@@ -5620,12 +5713,33 @@ list-templates:
 
 C_SRC_DIR ?= $(CURDIR)/c_src
 C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
-C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT).so
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
 C_SRC_TYPE ?= shared
 
 # System type and C compiler/flags.
 
-ifeq ($(PLATFORM),darwin)
+ifeq ($(PLATFORM),msys2)
+       C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+       C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+       C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+       C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+       C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+       C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+       CC = /mingw64/bin/gcc
+       export CC
+       CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
        CC ?= cc
        CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes
        CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall
@@ -5640,10 +5754,15 @@ else ifeq ($(PLATFORM),linux)
        CXXFLAGS ?= -O3 -finline-functions -Wall
 endif
 
-CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
-CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+ifneq ($(PLATFORM),msys2)
+       CFLAGS += -fPIC
+       CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
 
-LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lerl_interface -lei
 
 # Verbosity.
 
@@ -5680,15 +5799,15 @@ OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
 COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
 COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
 
-app:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
 
-test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
 
-$(C_SRC_OUTPUT): $(OBJECTS)
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
        $(verbose) mkdir -p priv/
        $(link_verbose) $(CC) $(OBJECTS) \
                $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
-               -o $(C_SRC_OUTPUT)
+               -o $(C_SRC_OUTPUT_FILE)
 
 %.o: %.c
        $(COMPILE_C) $(OUTPUT_OPTION) $<
@@ -5705,13 +5824,13 @@ $(C_SRC_OUTPUT): $(OBJECTS)
 clean:: clean-c_src
 
 clean-c_src:
-       $(gen_verbose) rm -f $(C_SRC_OUTPUT) $(OBJECTS)
+       $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
 
 endif
 
 ifneq ($(wildcard $(C_SRC_DIR)),)
 $(C_SRC_ENV):
-       $(verbose) $(ERL) -eval "file:write_file(\"$(C_SRC_ENV)\", \
+       $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
                io_lib:format( \
                        \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
                        \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
@@ -5889,7 +6008,7 @@ endif
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
-.PHONY: ct distclean-ct
+.PHONY: ct apps-ct distclean-ct
 
 # Configuration.
 
@@ -5919,22 +6038,44 @@ help::
 CT_RUN = ct_run \
        -no_auto_compile \
        -noinput \
-       -pa $(CURDIR)/ebin $(DEPS_DIR)/*/ebin $(TEST_DIR) \
+       -pa $(CURDIR)/ebin $(DEPS_DIR)/*/ebin $(APPS_DIR)/*/ebin $(TEST_DIR) \
        -dir $(TEST_DIR) \
        -logdir $(CURDIR)/logs
 
 ifeq ($(CT_SUITES),)
-ct:
+ct: $(if $(IS_APP),,apps-ct)
 else
-ct: test-build
+ct: test-build $(if $(IS_APP),,apps-ct)
        $(verbose) mkdir -p $(CURDIR)/logs/
-       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+       $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1:
+       $(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: test-build $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifndef t
+CT_EXTRA =
+else
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
 endif
 
 define ct_suite_target
 ct-$(1): test-build
        $(verbose) mkdir -p $(CURDIR)/logs/
-       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_OPTS)
+       $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
 endef
 
 $(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
@@ -5953,9 +6094,8 @@ DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
 export DIALYZER_PLT
 
 PLT_APPS ?=
-DIALYZER_DIRS ?= --src -r src
-DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \
-       -Wunmatched_returns # -Wunderspecs
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
 
 # Core targets.
 
@@ -5971,6 +6111,18 @@ help::
 
 # Plugin-specific targets.
 
+define filter_opts.erl
+       Opts = binary:split(<<"$1">>, <<"-">>, [global]),
+       Filtered = lists:reverse(lists:foldl(fun
+               (O = <<"pa ", _/bits>>, Acc) -> [O|Acc];
+               (O = <<"D ", _/bits>>, Acc) -> [O|Acc];
+               (O = <<"I ", _/bits>>, Acc) -> [O|Acc];
+               (_, Acc) -> Acc
+       end, [], Opts)),
+       io:format("~s~n", [[["-", O] || O <- Filtered]]),
+       halt().
+endef
+
 $(DIALYZER_PLT): deps app
        $(verbose) dialyzer --build_plt --apps erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS)
 
@@ -5984,7 +6136,7 @@ dialyze:
 else
 dialyze: $(DIALYZER_PLT)
 endif
-       $(verbose) dialyzer --no_native $(DIALYZER_DIRS) $(DIALYZER_OPTS)
+       $(verbose) dialyzer --no_native `$(call erlang,$(call filter_opts.erl,$(ERLC_OPTS)))` $(DIALYZER_DIRS) $(DIALYZER_OPTS)
 
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -5997,58 +6149,20 @@ EDOC_OPTS ?=
 
 # Core targets.
 
-docs:: distclean-edoc edoc
+ifneq ($(wildcard doc/overview.edoc),)
+docs:: edoc
+endif
 
 distclean:: distclean-edoc
 
 # Plugin-specific targets.
 
-edoc: doc-deps
+edoc: distclean-edoc doc-deps
        $(gen_verbose) $(ERL) -eval 'edoc:application($(PROJECT), ".", [$(EDOC_OPTS)]), halt().'
 
 distclean-edoc:
        $(gen_verbose) rm -f doc/*.css doc/*.html doc/*.png doc/edoc-info
 
-# Copyright (c) 2015, Erlang Solutions Ltd.
-# This file is part of erlang.mk and subject to the terms of the ISC License.
-
-.PHONY: elvis distclean-elvis
-
-# Configuration.
-
-ELVIS_CONFIG ?= $(CURDIR)/elvis.config
-
-ELVIS ?= $(CURDIR)/elvis
-export ELVIS
-
-ELVIS_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis
-ELVIS_CONFIG_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis.config
-ELVIS_OPTS ?=
-
-# Core targets.
-
-help::
-       $(verbose) printf "%s\n" "" \
-               "Elvis targets:" \
-               "  elvis       Run Elvis using the local elvis.config or download the default otherwise"
-
-distclean:: distclean-elvis
-
-# Plugin-specific targets.
-
-$(ELVIS):
-       $(gen_verbose) $(call core_http_get,$(ELVIS),$(ELVIS_URL))
-       $(verbose) chmod +x $(ELVIS)
-
-$(ELVIS_CONFIG):
-       $(verbose) $(call core_http_get,$(ELVIS_CONFIG),$(ELVIS_CONFIG_URL))
-
-elvis: $(ELVIS) $(ELVIS_CONFIG)
-       $(verbose) $(ELVIS) rock -c $(ELVIS_CONFIG) $(ELVIS_OPTS)
-
-distclean-elvis:
-       $(gen_verbose) rm -rf $(ELVIS)
-
 # Copyright (c) 2014 Dave Cottlehuber <dch@skunkwerks.at>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
@@ -6057,6 +6171,8 @@ distclean-elvis:
 # Configuration.
 
 ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
 ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
 
 ESCRIPT_BEAMS ?= "ebin/*", "deps/*/ebin/*"
@@ -6102,7 +6218,7 @@ define ESCRIPT_RAW
 '  ]),'\
 '  file:change_mode(Escript, 8#755)'\
 'end,'\
-'Ez("$(ESCRIPT_NAME)"),'\
+'Ez("$(ESCRIPT_FILE)"),'\
 'halt().'
 endef
 
@@ -6114,6 +6230,75 @@ escript:: distclean-escript deps app
 distclean-escript:
        $(gen_verbose) rm -f $(ESCRIPT_NAME)
 
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "EUnit targets:" \
+               "  eunit       Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+       case "$(COVER)" of
+               "" -> ok;
+               _ ->
+                       case cover:compile_beam_directory("ebin") of
+                               {error, _} -> halt(1);
+                               _ -> ok
+                       end
+       end,
+       case eunit:test($1, [$(EUNIT_OPTS)]) of
+               ok -> ok;
+               error -> halt(2)
+       end,
+       case "$(COVER)" of
+               "" -> ok;
+               _ ->
+                       cover:export("eunit.coverdata")
+       end,
+       halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(DEPS_DIR)/*/ebin $(APPS_DIR)/*/ebin $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build
+       $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build
+       $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+       $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP),,apps-eunit)
+       $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit:
+       $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; done
+endif
+endif
+
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
@@ -6124,7 +6309,7 @@ distclean-escript:
 RELX ?= $(CURDIR)/relx
 RELX_CONFIG ?= $(CURDIR)/relx.config
 
-RELX_URL ?= https://github.com/erlware/relx/releases/download/v3.5.0/relx
+RELX_URL ?= https://github.com/erlware/relx/releases/download/v3.19.0/relx
 RELX_OPTS ?=
 RELX_OUTPUT_DIR ?= _rel
 
@@ -6392,7 +6577,8 @@ define cover_report.erl
                true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
        TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
        TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
-       TotalPerc = round(100 * TotalY / (TotalY + TotalN)),
+       Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+       TotalPerc = Perc(TotalY, TotalN),
        {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
        io:format(F, "<!DOCTYPE html><html>~n"
                "<head><meta charset=\"UTF-8\">~n"
@@ -6402,7 +6588,7 @@ define cover_report.erl
        io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
        [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
                "<td>~p%</td></tr>~n",
-               [M, M, round(100 * Y / (Y + N))]) || {M, {Y, N}} <- Report1],
+               [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
        How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
        Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
        io:format(F, "</table>~n"
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index 6f089df098cc858f0e22f5b3b3efa608f48cc96a..cff8495244da602d2d770286276a1df8d221eae8 100644 (file)
 
 -include("rabbit_amqp1_0.hrl").
 
--ifdef(use_specs).
--spec(generate/1 :: (tuple()) -> iolist()).
--spec(build_frame/2 :: (int(), iolist()) -> iolist()).
--endif.
+-spec generate(tuple()) -> iolist().
+-spec build_frame(integer(), iolist()) -> iolist().
 
 -define(AMQP_FRAME_TYPE, 0).
 -define(DOFF, 2).
index 95c80778f2e7a443718cd98d2a52394a52311597..5a9f49f31158cd76a9aaf63d58a0cc5d9b9a0aa6 100644 (file)
@@ -20,9 +20,7 @@
 
 -include("rabbit_amqp1_0.hrl").
 
--ifdef(use_specs).
--spec(parse/1 :: (binary()) -> tuple()).
--endif.
+-spec parse(binary()) -> tuple().
 
 parse_all(ValueBin) when is_binary(ValueBin) ->
     lists:reverse(parse_all([], parse(ValueBin))).
index 5b423fff7de8e5f9e2ebc9ce55708b62c3a2b202..85e003de6f7b3f635aa796baa45fb01c2597b82d 100644 (file)
@@ -39,8 +39,12 @@ outcomes(Source) ->
                           _         -> DO
                       end,
                 Os1 = case Os of
-                          undefined -> ?OUTCOMES;
-                          _         -> Os
+                          undefined    -> ?OUTCOMES;
+                          {list, Syms} -> Syms;
+                          Bad1         -> rabbit_amqp1_0_util:protocol_error(
+                                            ?V_1_0_AMQP_ERROR_NOT_IMPLEMENTED,
+                                            "Outcomes not supported: ~p",
+                                            [Bad1])
                       end,
                 {DO1, Os1};
             _ ->
index 7cf7900c0f1458b19b9a3dc6d9019a50493267d0..ee09d76c62be6492fe56e616ac606c9b4095c555 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([start_link_args/0]).
 
--type(start_link_args() ::
+-type start_link_args() ::
         {rabbit_types:protocol(), rabbit_net:socket(),
          rabbit_channel:channel_number(), non_neg_integer(), pid(),
-         rabbit_access_control:username(), rabbit_types:vhost(), pid()}).
-
--spec(start_link/1 :: (start_link_args()) -> {'ok', pid(), pid()}).
-
--endif.
+         rabbit_access_control:username(), rabbit_types:vhost(), pid()}.
 
+-spec start_link(start_link_args()) -> {'ok', pid(), pid()}.
 
 %%----------------------------------------------------------------------------
 start_link({rabbit_amqp1_0_framing, Sock, Channel, FrameMax, ReaderPid,
@@ -50,14 +45,14 @@ start_link({rabbit_amqp1_0_framing, Sock, Channel, FrameMax, ReaderPid,
           {writer, {rabbit_amqp1_0_writer, start_link,
                     [Sock, Channel, FrameMax, rabbit_amqp1_0_framing,
                      ReaderPid]},
-           intrinsic, ?MAX_WAIT, worker, [rabbit_amqp1_0_writer]}),
+           intrinsic, ?WORKER_WAIT, worker, [rabbit_amqp1_0_writer]}),
     {ok, ChannelPid} =
         supervisor2:start_child(
           SupPid,
           {channel, {rabbit_amqp1_0_session_process, start_link,
                      [{Channel, ReaderPid, WriterPid, Username, VHost, FrameMax,
                        adapter_info(Sock), Collector}]},
-           intrinsic, ?MAX_WAIT, worker, [rabbit_amqp1_0_session_process]}),
+           intrinsic, ?WORKER_WAIT, worker, [rabbit_amqp1_0_session_process]}),
     {ok, SupPid, ChannelPid}.
 
 %%----------------------------------------------------------------------------
index 1e1eaa50e7b2018922b9a57e44b022945fd9815c..4fff8b47a2a275fcacb2c96aedc4353a79c5108d 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start_session/2 :: (pid(), rabbit_amqp1_0_session_sup:start_link_args()) ->
-                              {'ok', pid(), pid()}).
-
--endif.
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec start_session(pid(), rabbit_amqp1_0_session_sup:start_link_args()) ->
+                              {'ok', pid(), pid()}.
 
 %%----------------------------------------------------------------------------
 
index 1e608db45999a543d4eb7e53c4d5b7b28321641b..7c6786f8aa0ea2317e301fe2aa8a9d278b9bf1a8 100644 (file)
 -export([protocol_error/3]).
 -export([serial_add/2, serial_compare/2, serial_diff/2]).
 
--ifdef(use_specs).
-
 -export_type([serial_number/0]).
--type(serial_number() :: non_neg_integer()).
--type(serial_compare_result() :: 'equal' | 'less' | 'greater').
-
--spec(serial_add/2 :: (serial_number(), non_neg_integer()) ->
-             serial_number()).
--spec(serial_compare/2 :: (serial_number(), serial_number()) ->
-             serial_compare_result()).
--spec(serial_diff/2 :: (serial_number(), serial_number()) ->
-             integer()).
-
--endif.
+-type serial_number() :: non_neg_integer().
+-type serial_compare_result() :: 'equal' | 'less' | 'greater'.
 
+-spec serial_add(serial_number(), non_neg_integer()) ->
+             serial_number().
+-spec serial_compare(serial_number(), serial_number()) ->
+             serial_compare_result().
+-spec serial_diff(serial_number(), serial_number()) ->
+             integer().
 
 protocol_error(Condition, Msg, Args) ->
     exit(#'v1_0.error'{
index e70f72881001ebc1558cbf9e9c660db86e0030e9..03b02fb9a4707a6deb6347b9e7c9d515ab7bdb7c 100644 (file)
 
 %%---------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start/5 ::
+-spec start
         (rabbit_net:socket(), rabbit_channel:channel_number(),
          non_neg_integer(), rabbit_types:protocol(), pid())
-        -> rabbit_types:ok(pid())).
--spec(start_link/5 ::
+        -> rabbit_types:ok(pid()).
+-spec start_link
         (rabbit_net:socket(), rabbit_channel:channel_number(),
          non_neg_integer(), rabbit_types:protocol(), pid())
-        -> rabbit_types:ok(pid())).
--spec(start/6 ::
+        -> rabbit_types:ok(pid()).
+-spec start
         (rabbit_net:socket(), rabbit_channel:channel_number(),
          non_neg_integer(), rabbit_types:protocol(), pid(), boolean())
-        -> rabbit_types:ok(pid())).
--spec(start_link/6 ::
+        -> rabbit_types:ok(pid()).
+-spec start_link
         (rabbit_net:socket(), rabbit_channel:channel_number(),
          non_neg_integer(), rabbit_types:protocol(), pid(), boolean())
-        -> rabbit_types:ok(pid())).
--spec(send_command/2 ::
-        (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(send_command/3 ::
+        -> rabbit_types:ok(pid()).
+-spec send_command
+        (pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command
         (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
-        -> 'ok').
--spec(send_command_sync/2 ::
-        (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(send_command_sync/3 ::
+        -> 'ok'.
+-spec send_command_sync
+        (pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command_sync
         (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
-        -> 'ok').
--spec(send_command_and_notify/4 ::
+        -> 'ok'.
+-spec send_command_and_notify
         (pid(), pid(), pid(), rabbit_framing:amqp_method_record())
-        -> 'ok').
--spec(send_command_and_notify/5 ::
+        -> 'ok'.
+-spec send_command_and_notify
         (pid(), pid(), pid(), rabbit_framing:amqp_method_record(),
          rabbit_types:content())
-        -> 'ok').
--spec(internal_send_command/4 ::
+        -> 'ok'.
+-spec internal_send_command
         (rabbit_net:socket(), rabbit_channel:channel_number(),
          rabbit_framing:amqp_method_record(), rabbit_types:protocol())
-        -> 'ok').
--spec(internal_send_command/6 ::
+        -> 'ok'.
+-spec internal_send_command
         (rabbit_net:socket(), rabbit_channel:channel_number(),
          rabbit_framing:amqp_method_record(), rabbit_types:content(),
          non_neg_integer(), rabbit_types:protocol())
-        -> 'ok').
-
--endif.
+        -> 'ok'.
 
 %%---------------------------------------------------------------------------
 
index 63c98b00f5cdcfb4e55ed23888a1289326e74a48..9e12bc819d16dc39c377cda136d67a3bdf56366f 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_amqp1_0,
  [{description, "AMQP 1.0 support for RabbitMQ"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {env, [{default_user, "guest"},
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/lib-java/junit.jar b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/lib-java/junit.jar
deleted file mode 100644 (file)
index 674d71e..0000000
Binary files a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/lib-java/junit.jar and /dev/null differ
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/proton/Makefile b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/proton/Makefile
deleted file mode 100644 (file)
index 0266a00..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-PROTON_VER=0.7
-PROTON_DIR=qpid-proton-$(PROTON_VER)
-PROTON_TARBALL=$(PROTON_DIR).tar.gz
-PROTON_URL=http://www.mirrorservice.org/sites/ftp.apache.org/qpid/proton/$(PROTON_VER)/$(PROTON_TARBALL)
-
-.PHONY: test
-
-test: build/lib
-       ant test
-
-build/lib: $(PROTON_TARBALL)
-       mkdir -p build/tmp
-       tar xvz -C build/tmp -f $(PROTON_TARBALL)
-       cd build/tmp/$(PROTON_DIR)/proton-j && mvn package
-       mkdir -p build/lib
-       cp build/tmp/$(PROTON_DIR)/proton-j/target/proton-j-$(PROTON_VER).jar build/lib
-       cp ../lib-java/*.jar build/lib
-
-clean:
-       rm -rf build $(PROTON_TARBALL)
-
-$(PROTON_TARBALL):
-       wget $(PROTON_URL)
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/proton/build.xml b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/proton/build.xml
deleted file mode 100644 (file)
index a5c50d4..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<project name="RabbitMQ AMQP 1.0 tests using Proton client" default="test">
-    <target name="test-build">
-        <mkdir dir="build/classes"/>
-
-        <javac srcdir="test" destdir="build/classes" debug="true">
-            <classpath>
-                <fileset dir="build/lib">
-                    <include name="**/*.jar"/>
-                </fileset>
-            </classpath>
-        </javac>
-    </target>
-
-    <target name="test" depends="test-build">
-        <mkdir dir="build/test-output"/>
-
-        <junit printSummary="withOutAndErr" fork="yes" failureproperty="test.failed">
-            <classpath>
-                <fileset dir="build/lib">
-                    <include name="**/*.jar"/>
-                </fileset>
-                <pathelement location="build/classes"/>
-            </classpath>
-            <formatter type="plain"/>
-            <test todir="build/test-output" name="com.rabbitmq.amqp1_0.tests.proton.ProtonTests"/>
-        </junit>
-        <fail message="Tests failed" if="test.failed" />
-    </target>
-</project>
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/proton/test/com/rabbitmq/amqp1_0/tests/proton/ProtonTests.java b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/proton/test/com/rabbitmq/amqp1_0/tests/proton/ProtonTests.java
deleted file mode 100644 (file)
index a375900..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-package com.rabbitmq.amqp1_0.tests.proton;
-
-import junit.framework.TestCase;
-import org.apache.qpid.proton.amqp.Binary;
-import org.apache.qpid.proton.amqp.messaging.Data;
-import org.apache.qpid.proton.message.Message;
-import org.apache.qpid.proton.message.impl.MessageImpl;
-import org.apache.qpid.proton.messenger.Messenger;
-import org.apache.qpid.proton.messenger.impl.MessengerImpl;
-
-public class ProtonTests extends TestCase {
-    public static final String ADDRESS = "amqp://localhost/amqp-1.0-test";
-    // This uses deprecated classes, yes. I took them from the examples provided...
-
-    public void testRoundTrip() throws Exception {
-        Messenger mng = new MessengerImpl();
-        mng.start();
-        Message msg = new MessageImpl();
-        msg.setAddress(ADDRESS);
-        msg.setSubject("hello");
-        msg.setContentType("application/octet-stream");
-        msg.setBody(new Data(new Binary("hello world".getBytes())));
-        mng.put(msg);
-        mng.send();
-
-        mng.subscribe(ADDRESS);
-        mng.recv();
-        Message msg2 = mng.get();
-        assertEquals(msg.getSubject(), msg2.getSubject());
-        assertEquals(msg.getContentType(), msg2.getContentType());
-        assertEquals(msg.getBody().toString(), msg2.getBody().toString());
-        mng.stop();
-    }
-}
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/swiftmq/Makefile b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/swiftmq/Makefile
deleted file mode 100644 (file)
index 3963a3d..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-CLIENT_DIR=swiftmq_9_7_1_client
-CLIENT_PKG=$(CLIENT_DIR).tar.gz
-
-.PHONY: test
-
-test: build/lib
-       $(ANT) test
-
-build/lib: $(CLIENT_PKG)
-       mkdir -p build/tmp
-       tar -zx -f $(CLIENT_PKG) -C build/tmp
-       mkdir -p build/lib
-       mv build/tmp/$(CLIENT_DIR)/jars/*.jar build/lib
-       rm -rf build/tmp
-       cp ../lib-java/*.jar build/lib
-       (cd $(DEPS_DIR)/rabbitmq_java_client && ant dist)
-       cp $(DEPS_DIR)/rabbitmq_java_client/build/dist/rabbitmq-client.jar build/lib
-
-$(CLIENT_PKG):
-       @echo
-       @echo You need $(CLIENT_PKG) to run these tests. Unfortunately we can\'t
-       @echo redistribute it. Obtain it from the SwiftMQ website and place it
-       @echo in $(shell pwd).
-       @echo
-       @false
-
-clean:
-       rm -rf build
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/swiftmq/build.xml b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/swiftmq/build.xml
deleted file mode 100644 (file)
index 2152708..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<project name="RabbitMQ AMQP 1.0 tests using SwiftMQ client" default="test">
-    <target name="test-build">
-        <mkdir dir="build/classes"/>
-
-        <javac srcdir="test" destdir="build/classes" debug="true">
-            <classpath>
-                <fileset dir="build/lib">
-                    <include name="**/*.jar"/>
-                </fileset>
-            </classpath>
-        </javac>
-    </target>
-
-    <target name="test" depends="test-build">
-        <mkdir dir="build/test-output"/>
-
-        <junit printSummary="withOutAndErr" fork="yes" failureproperty="test.failed">
-            <classpath>
-                <fileset dir="build/lib">
-                    <include name="**/*.jar"/>
-                </fileset>
-                <pathelement location="build/classes"/>
-            </classpath>
-            <formatter type="plain"/>
-            <test todir="build/test-output" name="com.rabbitmq.amqp1_0.tests.swiftmq.SwiftMQTests"/>
-        </junit>
-        <fail message="Tests failed" if="test.failed" />
-    </target>
-</project>
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/swiftmq/run-tests.sh b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/swiftmq/run-tests.sh
deleted file mode 100755 (executable)
index b055576..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh -e
-${MAKE:-make} -C $(dirname $0) test
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/swiftmq/test/com/rabbitmq/amqp1_0/tests/swiftmq/SwiftMQTests.java b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/swiftmq/test/com/rabbitmq/amqp1_0/tests/swiftmq/SwiftMQTests.java
deleted file mode 100644 (file)
index a44fb48..0000000
+++ /dev/null
@@ -1,404 +0,0 @@
-package com.rabbitmq.amqp1_0.tests.swiftmq;
-
-import com.rabbitmq.client.*;
-import com.swiftmq.amqp.AMQPContext;
-import com.swiftmq.amqp.v100.client.*;
-import com.swiftmq.amqp.v100.client.Connection;
-import com.swiftmq.amqp.v100.client.Consumer;
-import com.swiftmq.amqp.v100.generated.messaging.message_format.*;
-import com.swiftmq.amqp.v100.generated.messaging.message_format.Properties;
-import com.swiftmq.amqp.v100.messaging.AMQPMessage;
-import com.swiftmq.amqp.v100.types.*;
-import junit.framework.TestCase;
-
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.math.BigDecimal;
-import java.util.*;
-
-public class SwiftMQTests extends TestCase {
-    private static final String host = "localhost";
-    private static final int port = 5672;
-    private static final int INBOUND_WINDOW = 100;
-    private static final int OUTBOUND_WINDOW = 100;
-    private static final int CONSUMER_LINK_CREDIT = 200;
-    private static final String QUEUE = "/queue/test";
-    private static final int RECEIVE_TIMEOUT = 10000; // 10 seconds timeout.
-
-    private AMQPMessage msg() {
-        AMQPMessage m = new AMQPMessage();
-        m.addData(data());
-        return m;
-    }
-
-    private Data data() {
-        return new Data("Hello World".getBytes());
-    }
-
-    public void testRoundTrip() throws Exception {
-        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
-        Connection conn = new Connection(ctx, host, port, false);
-        conn.connect();
-
-        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-        Producer p = s.createProducer(QUEUE, QoS.AT_LEAST_ONCE);
-        p.send(msg());
-        p.close(); // Settlement happens here
-        Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
-        AMQPMessage m = c.receive(RECEIVE_TIMEOUT);
-        m.accept();
-        assertEquals(1, m.getData().size());
-        assertEquals(data(), m.getData().get(0));
-        conn.close();
-    }
-
-    public void testMessageFragmentation()
-            throws UnsupportedProtocolVersionException, AMQPException, AuthenticationException, IOException {
-        fragmentation(512L,  512);
-        fragmentation(512L,  600);
-        fragmentation(512L,  1024);
-        fragmentation(1024L, 1024);
-    }
-
-    public void fragmentation(long FrameSize, int PayloadSize)
-            throws UnsupportedProtocolVersionException, AMQPException, AuthenticationException, IOException {
-        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
-        Connection conn = new Connection(ctx, host, port, false);
-        conn.setMaxFrameSize(FrameSize);
-        conn.connect();
-        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-
-        Producer p = s.createProducer(QUEUE, QoS.AT_LEAST_ONCE);
-        AMQPMessage msg = new AMQPMessage();
-        msg.addData(new Data(new byte [PayloadSize]));
-        p.send(msg);
-        p.close();
-
-        Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
-        AMQPMessage m = c.receive(RECEIVE_TIMEOUT);
-        m.accept();
-        c.close();
-        assertEquals(PayloadSize, m.getData().get(0).getValue().length);
-        conn.close();
-    }
-
-    public void testMessageAnnotations() throws Exception {
-        decorationTest(new DecorationProtocol() {
-            @Override
-            public void decorateMessage(AMQPMessage msg, Map<AMQPString, AMQPType> m) throws IOException {
-                msg.setMessageAnnotations(new MessageAnnotations(m));
-            }
-            @Override
-            public Map<AMQPType, AMQPType> getDecoration(AMQPMessage msg) throws IOException {
-                return msg.getMessageAnnotations().getValue();
-            }
-        }, annotationMap());
-    }
-
-    public void testFooter() throws Exception {
-        decorationTest(new DecorationProtocol() {
-            @Override
-            public void decorateMessage(AMQPMessage msg, Map<AMQPString, AMQPType> m) throws IOException {
-                msg.setFooter(new Footer(m));
-            }
-            @Override
-            public Map<AMQPType, AMQPType> getDecoration(AMQPMessage msg) throws IOException {
-                return msg.getFooter().getValue();
-            }
-        }, annotationMap());
-    }
-
-    public void testDataTypes() throws Exception {
-        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
-        Connection conn = new Connection(ctx, host, port, false);
-        conn.connect();
-
-        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-        Producer p = s.createProducer(QUEUE, QoS.AT_LEAST_ONCE);
-        AMQPMessage msg = new AMQPMessage();
-
-        List<AMQPType> al = new ArrayList<AMQPType>();
-        al.add(new AMQPBoolean(true));
-        al.add(new AMQPByte(Byte.MAX_VALUE));
-        al.add(new AMQPChar(Character.CURRENCY_SYMBOL));
-        al.add(new AMQPDecimal64(BigDecimal.TEN));
-        al.add(new AMQPDouble(Double.NaN));
-        al.add(new AMQPInt(Integer.MIN_VALUE));
-        al.add(new AMQPNull());
-        al.add(new AMQPString("\uFFF9"));
-        al.add(new AMQPSymbol(new String(new char[256])));
-        al.add(new AMQPTimestamp(Long.MAX_VALUE));
-        al.add(new AMQPUuid(System.currentTimeMillis(), Long.MIN_VALUE));
-        al.add(new AMQPUnsignedShort(0));
-        al.add(new AMQPArray(AMQPBoolean.FALSE.getCode(), new AMQPBoolean[]{}));
-        al.add(new AmqpSequence(new ArrayList<AMQPType>()));
-        AmqpSequence seq = new AmqpSequence(al);
-        AmqpValue val = new AmqpValue(seq);
-        msg.setAmqpValue(val);
-
-        p.send(msg);
-        p.close();
-        Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
-        AMQPMessage recvMsg = c.receive(RECEIVE_TIMEOUT);
-        recvMsg.accept();
-
-        assertEquals(val.getValue().getValueString(), recvMsg.getAmqpValue().getValue().getValueString());
-        conn.close();
-    }
-
-    public void testAtMostOnce() throws Exception {
-        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
-        Connection conn = new Connection(ctx, host, port, false);
-        conn.connect();
-
-        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-        Producer p = s.createProducer(QUEUE, QoS.AT_MOST_ONCE);
-        p.send(msg());
-        p.close();
-
-        Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_MOST_ONCE, false, null);
-        AMQPMessage m = c.receive(RECEIVE_TIMEOUT);
-        assertTrue(m.isSettled());
-
-        s.close();
-        s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-        c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_MOST_ONCE, false, null);
-        assertNull(get(c));
-        conn.close();
-    }
-
-    public void testReject() throws Exception {
-        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
-        Connection conn = new Connection(ctx, host, port, false);
-        conn.connect();
-
-        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-        Producer p = s.createProducer(QUEUE, QoS.AT_LEAST_ONCE);
-        p.send(msg());
-        p.close();
-
-        Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
-        AMQPMessage m = c.receive(RECEIVE_TIMEOUT);
-        m.reject();
-        assertNull(get(c));
-        conn.close();
-    }
-
-    public void testRedelivery() throws Exception {
-        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
-        Connection conn = new Connection(ctx, host, port, false);
-        conn.connect();
-
-        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-        Producer p = s.createProducer(QUEUE, QoS.AT_MOST_ONCE);
-        p.send(msg());
-        p.close();
-
-        Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
-        AMQPMessage m1 = c.receive(RECEIVE_TIMEOUT);
-        assertTrue(m1.getHeader().getFirstAcquirer().getValue());
-        assertFalse(m1.isSettled());
-
-        s.close();
-        s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-        c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
-        AMQPMessage m2 = c.receive(RECEIVE_TIMEOUT);
-        m2.accept();
-
-        assertTrue(compareMessageData(m1, m2));
-        assertFalse(m2.getHeader().getFirstAcquirer().getValue());
-        assertNull(get(c));
-        conn.close();
-    }
-
-    public void testRouting() throws Exception {
-        route("test",                      QUEUE,                  "",         true);
-        route(QUEUE,                      "test",                  "",         true);
-        route("test",                     "test",                  "",         true);
-
-        route("/topic/#.c.*",              "/topic/a.b.c.d",        "",        true);
-        route("/topic/#.c.*",              "/exchange/amq.topic",   "a.b.c.d", true);
-        route("/exchange/amq.topic/#.y.*", "/topic/w.x.y.z",        "",        true);
-        route("/exchange/amq.topic/#.y.*", "/exchange/amq.topic",   "w.x.y.z", true);
-
-        route("/exchange/amq.fanout/",     "/exchange/amq.fanout",  "",        true);
-        route("/exchange/amq.direct/",     "/exchange/amq.direct",  "",        true);
-        route("/exchange/amq.direct/a",    "/exchange/amq.direct",  "a",       true);
-
-        /* The following three tests rely on the queue "test" created by
-         * previous tests in this function. */
-        route("/amq/queue/test",           QUEUE,                   "",        true);
-        route(QUEUE,                       "/amq/queue/test",       "",        true);
-        route("/amq/queue/test",           "/amq/queue/test",       "",        true);
-
-        /* The following tests verify that a queue created out-of-band in AMQP
-         * is reachable from the AMQP 1.0 world. */
-        ConnectionFactory factory = new ConnectionFactory();
-        com.rabbitmq.client.Connection connection = factory.newConnection();
-        Channel channel = connection.createChannel();
-        channel.queueDeclare("transient_q", false, false, false, null);
-        route("/amq/queue/transient_q",    "/amq/queue/transient_q", "",       true);
-        channel.queueDelete("transient_q");
-        channel.queueDeclare("durable_q", true, false, false, null);
-        route("/amq/queue/durable_q",      "/amq/queue/durable_q",  "",        true);
-        channel.queueDelete("durable_q");
-        channel.queueDeclare("autodel_q", false, false, true, null);
-        route("/amq/queue/autodel_q",      "/amq/queue/autodel_q",  "",        true);
-        channel.queueDelete("autodel_q");
-        connection.close();
-
-        route("/exchange/amq.direct/b",    "/exchange/amq.direct",  "a",       false);
-        route(QUEUE,                       "/exchange/amq.fanout",  "",        false);
-        route(QUEUE,                       "/exchange/amq.headers", "",        false);
-        emptyQueue(QUEUE);
-    }
-
-    public void testRoutingInvalidRoutes() throws Exception {
-        ConnectionFactory factory = new ConnectionFactory();
-        com.rabbitmq.client.Connection connection = factory.newConnection();
-        Channel channel = connection.createChannel();
-        channel.queueDeclare("transient", false, false, false, null);
-        connection.close();
-
-        for (String dest : Arrays.asList("/exchange/missing", "/fruit/orange")) {
-            routeInvalidSource(dest);
-            routeInvalidTarget(dest);
-        }
-    }
-
-    private void emptyQueue(String q) throws Exception {
-        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
-        Connection conn = new Connection(ctx, host, port, false);
-        conn.connect();
-        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-        Consumer c = s.createConsumer(q, CONSUMER_LINK_CREDIT, QoS.AT_MOST_ONCE, false, null);
-        AMQPMessage m;
-        while ((m = get(c)) != null);
-        conn.close();
-    }
-
-    // Whatever Consumer.receiveNoWait() does, it does not involve the drain
-    // flag, so it's clearly more a case of "have any messages arrived?" rather
-    // than "has the queue got any messages?" Therefore we have an icky timeout
-    // to give the server time to deliver messages. Really we want a way to use
-    // drain...
-    private AMQPMessage get(Consumer c) {
-        return c.receive(100);
-    }
-
-    private void route(String consumerSource, String producerTarget, String routingKey, boolean succeed) throws Exception {
-        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
-        Connection conn = new Connection(ctx, host, port, false);
-        conn.connect();
-        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-
-        Consumer c = s.createConsumer(consumerSource, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
-        Producer p = s.createProducer(producerTarget, QoS.AT_LEAST_ONCE);
-        AMQPMessage msg = msg();
-        AmqpValue sentinel = new AmqpValue(new AMQPDouble(Math.random()));
-        msg.setAmqpValue(sentinel);
-        Properties props = new Properties();
-        props.setSubject(new AMQPString(routingKey));
-        msg.setProperties(props);
-        p.send(msg);
-
-        if (succeed) {
-            AMQPMessage m = c.receive(RECEIVE_TIMEOUT);
-            assertNotNull(m);
-            assertEquals(sentinel.getValue().getValueString(), m.getAmqpValue().getValue().getValueString());
-            m.accept();
-        } else {
-            assertNull(get(c));
-        }
-        c.close();
-        p.close();
-        conn.close();
-    }
-
-    private void routeInvalidSource(String consumerSource) throws Exception {
-        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
-        Connection conn = new Connection(ctx, host, port, false);
-        conn.connect();
-        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-        try {
-            Consumer c = s.createConsumer(consumerSource, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
-            c.close();
-            fail("Source '" + consumerSource + "' should fail");
-        }
-        catch (Exception e) {
-            // no-op
-        }
-        finally {
-            conn.close();
-        }
-    }
-
-    private void routeInvalidTarget(String producerTarget) throws Exception {
-        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
-        Connection conn = new Connection(ctx, host, port, false);
-        conn.connect();
-        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-        try {
-            Producer p = s.createProducer(producerTarget, QoS.AT_LEAST_ONCE);
-            p.close();
-            fail("Target '" + producerTarget + "' should fail");
-        }
-        catch (Exception e) {
-            // no-op
-        }
-        finally {
-            conn.close();
-        }
-    }
-
-    // TODO: generalise to a comparison of all immutable parts of messages
-    private boolean compareMessageData(AMQPMessage m1, AMQPMessage m2) throws IOException {
-        ByteArrayOutputStream b1 = new ByteArrayOutputStream();
-        ByteArrayOutputStream b2 = new ByteArrayOutputStream();
-
-        m1.getData().get(0).writeContent(new DataOutputStream(b1));
-        m2.getData().get(0).writeContent(new DataOutputStream(b2));
-        return Arrays.equals(b1.toByteArray(), b2.toByteArray());
-    }
-
-    private void decorationTest(DecorationProtocol d, Map<AMQPString, AMQPType> map) throws Exception {
-        AMQPContext ctx = new AMQPContext(AMQPContext.CLIENT);
-        Connection conn = new Connection(ctx, host, port, false);
-        conn.connect();
-        Session s = conn.createSession(INBOUND_WINDOW, OUTBOUND_WINDOW);
-        Producer p = s.createProducer(QUEUE, QoS.AT_LEAST_ONCE);
-        AMQPMessage msg = msg();
-
-        d.decorateMessage(msg, map);
-        p.send(msg);
-        p.close();
-        Consumer c = s.createConsumer(QUEUE, CONSUMER_LINK_CREDIT, QoS.AT_LEAST_ONCE, false, null);
-        AMQPMessage recvMsg = c.receive(RECEIVE_TIMEOUT);
-        recvMsg.accept();
-
-        compareMaps(map, d.getDecoration(recvMsg));
-        conn.close();
-    }
-
-    private void compareMaps(Map<AMQPString, AMQPType> m1, Map<AMQPType, AMQPType> m2){
-        Set e1 = m1.entrySet();
-        Set e2 = m2.entrySet();
-        assertTrue(e1.containsAll(e2));
-        assertTrue(e2.containsAll(e1));
-    }
-
-    private Map<AMQPString, AMQPType> annotationMap() throws IOException {
-        Map<AMQPString, AMQPType> annotations = new HashMap<AMQPString, AMQPType>();
-        // the spec allows keys to be symbol or ulong only, but the library only allows string
-        annotations.put(new AMQPString("key1"), new AMQPString("value1"));
-        annotations.put(new AMQPString("key2"), new AMQPString("value2"));
-        return annotations;
-    }
-
-    private interface DecorationProtocol {
-        void decorateMessage(AMQPMessage msg, Map<AMQPString, AMQPType> m) throws IOException;
-        Map<AMQPType, AMQPType> getDecoration(AMQPMessage _) throws IOException;
-    }
-
-}
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE.erl b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE.erl
new file mode 100644 (file)
index 0000000..85fc6d7
--- /dev/null
@@ -0,0 +1,229 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(system_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-compile(export_all).
+
+all() ->
+    [
+      {group, dotnet},
+      {group, java}
+    ].
+
+groups() ->
+    [
+      {dotnet, [], [
+          roundtrip,
+          default_outcome,
+          outcomes,
+          fragmentation,
+          message_annotations,
+          footer,
+          data_types,
+          %% TODO at_most_once,
+          reject,
+          redelivery,
+          routing,
+          invalid_routes
+        ]},
+      {java, [], [
+          roundtrip,
+          message_annotations,
+          footer
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config.
+
+end_per_suite(Config) ->
+    Config.
+
+init_per_group(Group, Config) ->
+    Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, Suffix},
+        {amqp10_client_library, Group}
+      ]),
+    GroupSetupStep = case Group of
+        dotnet -> fun build_dotnet_test_project/1;
+        java   -> fun build_maven_test_project/1
+    end,
+    rabbit_ct_helpers:run_setup_steps(Config1, [
+        GroupSetupStep
+      ] ++
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_, Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+build_dotnet_test_project(Config) ->
+    TestProjectDir = filename:join(
+      [?config(data_dir, Config), "dotnet-tests"]),
+    Ret = rabbit_ct_helpers:exec(["dotnet", "restore"],
+      [{cd, TestProjectDir}]),
+    case Ret of
+        {ok, _} ->
+            rabbit_ct_helpers:set_config(Config,
+              {dotnet_test_project_dir, TestProjectDir});
+        _ ->
+            {skip, "Failed to fetch .NET Core test project dependencies"}
+    end.
+
+build_maven_test_project(Config) ->
+    TestProjectDir = filename:join([?config(data_dir, Config), "java-tests"]),
+    Ret = rabbit_ct_helpers:exec(["mvn", "test-compile"],
+      [{cd, TestProjectDir}]),
+    case Ret of
+        {ok, _} ->
+            rabbit_ct_helpers:set_config(Config,
+              {maven_test_project_dir, TestProjectDir});
+        _ ->
+            {skip, "Failed to build Maven test project"}
+    end.
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+roundtrip(Config) ->
+    run(Config, [
+        {dotnet, "roundtrip"},
+        {java, "RoundTripTest"}
+      ]).
+
+default_outcome(Config) ->
+    run(Config, [
+        {dotnet, "default_outcome"}
+      ]).
+
+outcomes(Config) ->
+    run(Config, [
+        {dotnet, "outcomes"}
+      ]).
+
+fragmentation(Config) ->
+    run(Config, [
+        {dotnet, "fragmentation"}
+      ]).
+
+message_annotations(Config) ->
+    run(Config, [
+        {dotnet, "message_annotations"},
+        {java, "MessageAnnotationsTest"}
+      ]).
+
+footer(Config) ->
+    run(Config, [
+        {dotnet, "footer"},
+        {java, "FooterTest"}
+      ]).
+
+data_types(Config) ->
+    run(Config, [
+        {dotnet, "data_types"}
+      ]).
+
+%% at_most_once(Config) ->
+%%     run(Config, [
+%%       ]).
+
+reject(Config) ->
+    run(Config, [
+        {dotnet, "reject"}
+      ]).
+
+redelivery(Config) ->
+    run(Config, [
+        {dotnet, "redelivery"}
+      ]).
+
+routing(Config) ->
+    Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+    amqp_channel:call(Ch, #'queue.declare'{queue   = <<"transient_q">>,
+                                           durable = false}),
+    amqp_channel:call(Ch, #'queue.declare'{queue   = <<"durable_q">>,
+                                           durable = true}),
+    amqp_channel:call(Ch, #'queue.declare'{queue       = <<"autodel_q">>,
+                                           auto_delete = true}),
+    run(Config, [
+        {dotnet, "routing"}
+      ]).
+
+invalid_routes(Config) ->
+    run(Config, [
+        {dotnet, "invalid_routes"}
+      ]).
+
+run(Config, Flavors) ->
+    ClientLibrary = ?config(amqp10_client_library, Config),
+    Fun = case ClientLibrary of
+        dotnet -> fun run_dotnet_test/2;
+        java   -> fun run_java_test/2
+    end,
+    case proplists:get_value(ClientLibrary, Flavors) of
+        false    -> ok;
+        TestName -> Fun(Config, TestName)
+    end.
+
+run_dotnet_test(Config, Method) ->
+    TestProjectDir = ?config(dotnet_test_project_dir, Config),
+    Ret = rabbit_ct_helpers:exec([
+        "dotnet",
+        "test",
+        %% TODO `--params` is not supported by dotnet-test-nunit
+        %% 3.4.0-beta-1.
+        %% "--params", {"rmq_broker_uri=~s",
+        %%   [rabbit_ct_broker_helpers:node_uri(Config, 0)]},
+        "--where", {"method == ~s", [Method]},
+        "--noresult"
+      ],
+      [
+        {cd, TestProjectDir},
+        {env, [
+            {"RMQ_BROKER_URI", rabbit_ct_broker_helpers:node_uri(Config, 0)}
+          ]}
+      ]),
+    {ok, _} = Ret.
+
+run_java_test(Config, Class) ->
+    TestProjectDir = ?config(maven_test_project_dir, Config),
+    Ret = rabbit_ct_helpers:exec([
+        "mvn",
+        "test",
+        {"-Dtest=~s", [Class]},
+        {"-Drmq_broker_uri=~s", [rabbit_ct_broker_helpers:node_uri(Config, 0)]}
+      ],
+      [{cd, TestProjectDir}]),
+    {ok, _} = Ret.
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/dotnet-tests/project.json b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/dotnet-tests/project.json
new file mode 100644 (file)
index 0000000..7073181
--- /dev/null
@@ -0,0 +1,26 @@
+{
+  "version": "1.0.0-*",
+  "buildOptions": {
+    "debugType": "portable",
+    "emitEntryPoint": false
+  },
+
+  "dependencies": {},
+
+  "frameworks": {
+    "netcoreapp1.0": {
+      "dependencies": {
+        "Microsoft.NETCore.App": {
+          "type": "platform",
+          "version": "1.0.0"
+        },
+       "NUnit": "3.4.0",
+       "dotnet-test-nunit": "3.4.0-beta-1",
+       "AmqpNetLite": "1.1.9-rc"
+      },
+      "imports": "dnxcore50"
+    }
+  },
+
+  "testRunner": "nunit"
+}
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/dotnet-tests/src/testsuite.cs b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/dotnet-tests/src/testsuite.cs
new file mode 100644 (file)
index 0000000..c2dfea1
--- /dev/null
@@ -0,0 +1,499 @@
+// vim:sw=4:et:
+
+using NUnit.Framework;
+
+using System;
+using System.Linq;
+using System.Threading;
+
+using Amqp;
+using Amqp.Framing;
+using Amqp.Types;
+
+namespace RabbitMQ.Amqp10
+{
+    [TestFixture]
+    public class Testsuite
+    {
+        [Test]
+        public void roundtrip()
+        {
+            string uri = get_broker_uri();
+
+            Connection connection = new Connection(new Address(uri));
+            Session session = new Session(connection);
+            SenderLink sender = new SenderLink(session,
+              "test-sender", "roundtrip-q");
+
+            Message message1 = new Message("Testing roundtrip");
+            sender.Send(message1);
+
+            ReceiverLink receiver = new ReceiverLink(session,
+              "test-receiver", "roundtrip-q");
+            receiver.SetCredit(100, true);
+            Message message2 = receiver.Receive();
+            receiver.Accept(message2);
+
+            sender.Close();
+            receiver.Close();
+            session.Close();
+            connection.Close();
+
+            Assert.That(message2.GetBody<string>(),
+              Is.EqualTo(message1.GetBody<string>()));
+        }
+
+        [TestCase("amqp:accepted:list", null)]
+        [TestCase("amqp:rejected:list", null)]
+        [TestCase("amqp:released:list", null)]
+        public void default_outcome(string default_outcome, string condition)
+        {
+            Outcome default_outcome_obj = null;
+            switch (default_outcome) {
+                case "amqp:accepted:list":
+                           default_outcome_obj = new Accepted();
+                           break;
+                case "amqp:rejected:list":
+                           default_outcome_obj = new Rejected();
+                           break;
+                case "amqp:released:list":
+                           default_outcome_obj = new Released();
+                           break;
+                case "amqp:modified:list":
+                           default_outcome_obj = new Modified();
+                           break;
+            }
+
+            Attach attach = new Attach() {
+                Source = new Source() {
+                  Address = "default_outcome-q",
+                  DefaultOutcome = default_outcome_obj
+                },
+                Target = new Target()
+              };
+
+            do_test_outcomes(attach, condition);
+        }
+
+        [TestCase("amqp:accepted:list", null)]
+        [TestCase("amqp:rejected:list", null)]
+        [TestCase("amqp:released:list", null)]
+        [TestCase("amqp:modified:list", "amqp:not-implemented")]
+        public void outcomes(string outcome, string condition)
+        {
+            Attach attach = new Attach() {
+                Source = new Source() {
+                  Address = "outcomes-q",
+                  Outcomes = new Symbol[] { new Symbol(outcome) }
+                },
+                Target = new Target()
+              };
+
+            do_test_outcomes(attach, condition);
+        }
+
+        internal void do_test_outcomes(Attach attach, string condition)
+        {
+            string uri = get_broker_uri();
+
+            Connection connection = new Connection(new Address(uri));
+            Session session = new Session(connection);
+
+            ManualResetEvent mre = new ManualResetEvent(false);
+            string error_name = null;
+
+            OnAttached attached = (Link link, Attach _attach) => {
+                error_name = null;
+                mre.Set();
+            };
+
+            ClosedCallback closed = (AmqpObject amqp_obj, Error error) => {
+                error_name = error.Condition;
+                mre.Set();
+            };
+            session.Closed = closed;
+
+            ReceiverLink receiver = new ReceiverLink(session,
+              "test-receiver", attach, attached);
+
+            mre.WaitOne();
+            if (condition == null) {
+                Assert.That(error_name, Is.Null);
+
+                session.Closed = null;
+                receiver.Close();
+                session.Close();
+            } else {
+                Assert.That(error_name, Is.EqualTo(condition));
+            }
+
+            connection.Close();
+        }
+
+        [TestCase(512U, 512U)]
+        [TestCase(512U, 600U)]
+        [TestCase(512U, 1024U)]
+        [TestCase(1024U, 1024U)]
+        public void fragmentation(uint frame_size, uint payload_size)
+        {
+            string uri = get_broker_uri();
+            Address address = new Address(uri);
+
+            Open open = new Open()
+            {
+                ContainerId = Guid.NewGuid().ToString(),
+                HostName = address.Host,
+                ChannelMax = 256,
+                MaxFrameSize = frame_size
+            };
+
+            Connection connection = new Connection(address, null, open, null);
+            Session session = new Session(connection);
+            SenderLink sender = new SenderLink(session,
+              "test-sender", "fragmentation-q");
+
+            Message message1 = new Message(String.Concat(
+              Enumerable.Repeat("a", (int)payload_size)));
+            sender.Send(message1);
+
+            ReceiverLink receiver = new ReceiverLink(session,
+              "test-receiver", "fragmentation-q");
+            receiver.SetCredit(100, true);
+            Message message2 = receiver.Receive();
+            receiver.Accept(message2);
+
+            sender.Close();
+            receiver.Close();
+            session.Close();
+            connection.Close();
+
+            Assert.That(message2.GetBody<string>(),
+              Is.EqualTo(message1.GetBody<string>()));
+        }
+
+        [Test]
+        public void message_annotations()
+        {
+            string uri = get_broker_uri();
+
+            Connection connection = new Connection(new Address(uri));
+            Session session = new Session(connection);
+            SenderLink sender = new SenderLink(session,
+              "test-sender", "annotations-q");
+
+            Message message1 = new Message("Testing message annotations");
+            message1.MessageAnnotations = new MessageAnnotations();
+            message1.MessageAnnotations[new Symbol("key1")] = "value1";
+            message1.MessageAnnotations[new Symbol("key2")] = "value2";
+            sender.Send(message1);
+
+            ReceiverLink receiver = new ReceiverLink(session,
+              "test-receiver", "annotations-q");
+            receiver.SetCredit(100, true);
+            Message message2 = receiver.Receive();
+            receiver.Accept(message2);
+
+            sender.Close();
+            receiver.Close();
+            session.Close();
+            connection.Close();
+
+            Assert.That(message2.GetBody<string>(),
+              Is.EqualTo(message1.GetBody<string>()));
+            Assert.That(message2.MessageAnnotations.Descriptor,
+              Is.EqualTo(message1.MessageAnnotations.Descriptor));
+            Assert.That(message2.MessageAnnotations.Map,
+              Is.EqualTo(message1.MessageAnnotations.Map));
+        }
+
+        [Test]
+        public void footer()
+        {
+            string uri = get_broker_uri();
+
+            Connection connection = new Connection(new Address(uri));
+            Session session = new Session(connection);
+            SenderLink sender = new SenderLink(session,
+              "test-sender", "footer-q");
+
+            Message message1 = new Message("Testing footer");
+            message1.Footer = new Footer();
+            message1.Footer[new Symbol("key1")] = "value1";
+            message1.Footer[new Symbol("key2")] = "value2";
+            sender.Send(message1);
+
+            ReceiverLink receiver = new ReceiverLink(session,
+              "test-receiver", "footer-q");
+            receiver.SetCredit(100, true);
+            Message message2 = receiver.Receive();
+            receiver.Accept(message2);
+
+            sender.Close();
+            receiver.Close();
+            session.Close();
+            connection.Close();
+
+            Assert.That(message2.GetBody<string>(),
+              Is.EqualTo(message1.GetBody<string>()));
+            Assert.That(message2.Footer.Descriptor,
+              Is.EqualTo(message1.Footer.Descriptor));
+            Assert.That(message2.Footer.Map,
+              Is.EqualTo(message1.Footer.Map));
+        }
+
+        [Test]
+        public void data_types()
+        {
+            string uri = get_broker_uri();
+
+            Connection connection = new Connection(new Address(uri));
+            Session session = new Session(connection);
+            SenderLink sender = new SenderLink(session,
+              "test-sender", "data_types-q");
+
+            var list = new Amqp.Types.List();
+            list.Add(true);
+            list.Add('$');
+            list.Add(Byte.MaxValue);
+            list.Add(Int16.MinValue);
+            list.Add(Int32.MinValue);
+            list.Add(Int64.MinValue);
+            list.Add(UInt16.MaxValue);
+            list.Add(UInt32.MaxValue);
+            list.Add(UInt64.MaxValue);
+            list.Add(Double.NaN);
+            list.Add(null);
+            list.Add("\uFFF9");
+            list.Add(new Symbol("Symbol"));
+            list.Add(DateTime.Parse("2008-11-01T19:35:00.0000000Z").ToUniversalTime());
+            list.Add(new Guid("f275ea5e-0c57-4ad7-b11a-b20c563d3b71"));
+            list.Add(new Amqp.Types.List() { "Boolean", true });
+            list.Add(new AmqpSequence() {
+              List = new Amqp.Types.List() { "Integer", 1 }
+            });
+
+            AmqpSequence body = new AmqpSequence() { List = list };
+            Message message1 = new Message(body);
+            sender.Send(message1);
+
+            ReceiverLink receiver = new ReceiverLink(session,
+              "test-receiver", "data_types-q");
+            receiver.SetCredit(100, true);
+            Message message2 = receiver.Receive();
+            receiver.Accept(message2);
+
+            sender.Close();
+            receiver.Close();
+            session.Close();
+            connection.Close();
+
+            /* AmqpSequence apparently can't be compared directly: we must
+             * compare their list instead. */
+            var list1 = message1.GetBody<AmqpSequence>().List;
+            var list2 = message2.GetBody<AmqpSequence>().List;
+            Assert.That(list2.Count, Is.EqualTo(list1.Count));
+
+            for (int i = 0; i < list1.Count; ++i) {
+                if (list[i] != null &&
+                  list1[i].GetType() == typeof(AmqpSequence)) {
+                    Assert.That(
+                      ((AmqpSequence)list2[i]).List,
+                      Is.EqualTo(((AmqpSequence)list1[i]).List));
+                } else {
+                    Assert.That(list2[i], Is.EqualTo(list1[i]));
+                }
+            }
+        }
+
+        [Test]
+        public void reject()
+        {
+            string uri = get_broker_uri();
+
+            Connection connection = new Connection(new Address(uri));
+            Session session = new Session(connection);
+            SenderLink sender = new SenderLink(session,
+              "test-sender", "reject-q");
+
+            Message message1 = new Message("Testing reject");
+            sender.Send(message1);
+            sender.Close();
+
+            ReceiverLink receiver = new ReceiverLink(session,
+              "test-receiver", "reject-q");
+            receiver.SetCredit(100, true);
+            Message message2 = receiver.Receive();
+            receiver.Reject(message2);
+
+            Assert.That(receiver.Receive(100), Is.Null);
+
+            receiver.Close();
+            session.Close();
+            connection.Close();
+        }
+
+        [Test]
+        public void redelivery()
+        {
+            string uri = get_broker_uri();
+
+            Connection connection = new Connection(new Address(uri));
+            Session session = new Session(connection);
+            SenderLink sender = new SenderLink(session,
+              "test-sender", "redelivery-q");
+
+            Message message1 = new Message("Testing redelivery");
+            sender.Send(message1);
+            sender.Close();
+
+            ReceiverLink receiver = new ReceiverLink(session,
+              "test-receiver", "redelivery-q");
+            receiver.SetCredit(100, true);
+            Message message2 = receiver.Receive();
+
+            Assert.That(message2.Header.FirstAcquirer, Is.True);
+            // FIXME Assert.That(message2.Delivery.Settled, Is.False);
+
+            receiver.Close();
+            session.Close();
+
+            session = new Session(connection);
+            receiver = new ReceiverLink(session,
+              "test-receiver", "redelivery-q");
+            receiver.SetCredit(100, true);
+            Message message3 = receiver.Receive();
+            receiver.Accept(message3);
+
+            Assert.That(message3.GetBody<string>(),
+              Is.EqualTo(message2.GetBody<string>()));
+            Assert.That(message3.Header.FirstAcquirer, Is.False);
+
+            Assert.That(receiver.Receive(100), Is.Null);
+
+            receiver.Close();
+            session.Close();
+            connection.Close();
+        }
+
+        [TestCase("/queue/test", "test", "", true)]
+        [TestCase("test", "/queue/test", "", true)]
+        [TestCase("test", "test", "", true)]
+
+        [TestCase("/topic/a.b.c.d",      "/topic/#.c.*",              "",        true)]
+        [TestCase("/exchange/amq.topic", "/topic/#.c.*",              "a.b.c.d", true)]
+        [TestCase("/topic/w.x.y.z",      "/exchange/amq.topic/#.y.*", "",        true)]
+        [TestCase("/exchange/amq.topic", "/exchange/amq.topic/#.y.*", "w.x.y.z", true)]
+
+        [TestCase("/exchange/amq.fanout",  "/exchange/amq.fanout/",  "",  true)]
+        [TestCase("/exchange/amq.direct",  "/exchange/amq.direct/",  "",  true)]
+        [TestCase("/exchange/amq.direct",  "/exchange/amq.direct/a", "a", true)]
+
+        /* FIXME: The following three tests rely on the queue "test"
+         * created by previous tests in this function. */
+        [TestCase("/queue/test",     "/amq/queue/test", "", true)]
+        [TestCase("/amq/queue/test", "/queue/test",     "", true)]
+        [TestCase("/amq/queue/test", "/amq/queue/test", "", true)]
+
+        /* The following tests verify that a queue created out-of-band
+         * in AMQP is reachable from the AMQP 1.0 world. Queues are created
+         * from the common_test suite. */
+        [TestCase("/amq/queue/transient_q", "/amq/queue/transient_q", "", true)]
+        [TestCase("/amq/queue/durable_q",   "/amq/queue/durable_q",   "", true)]
+        [TestCase("/amq/queue/autodel_q",   "/amq/queue/autodel_q",   "", true)]
+
+        public void routing(String target, String source,
+          String routing_key, bool succeed)
+        {
+            string uri = get_broker_uri();
+
+            Connection connection = new Connection(new Address(uri));
+            Session session = new Session(connection);
+            SenderLink sender = new SenderLink(session,
+              "test-sender", target);
+            ReceiverLink receiver = new ReceiverLink(session,
+              "test-receiver", source);
+            receiver.SetCredit(100, true);
+
+            Random rnd = new Random();
+            Message message1 = new Message(rnd.Next(10000));
+            Properties props = new Properties() {
+                Subject = routing_key
+            };
+            message1.Properties = props;
+            sender.Send(message1);
+
+            if (succeed) {
+                Message message2 = receiver.Receive(3000);
+                receiver.Accept(message2);
+                Assert.That(message2, Is.Not.Null);
+                Assert.That(message2.GetBody<int>(),
+                  Is.EqualTo(message1.GetBody<int>()));
+            } else {
+                Message message2 = receiver.Receive(100);
+                Assert.That(message2, Is.Null);
+            }
+
+            sender.Close();
+            receiver.Close();
+            session.Close();
+            connection.Close();
+        }
+
+        [TestCase("/exchange/missing", "amqp:not-found")]
+        [TestCase("/fruit/orange", "amqp:invalid-field")]
+        public void invalid_routes(string dest, string condition)
+        {
+            string uri = get_broker_uri();
+
+            Connection connection = new Connection(new Address(uri));
+            Session session = new Session(connection);
+
+            ManualResetEvent mre = new ManualResetEvent(false);
+            string error_name = null;
+
+            OnAttached attached = delegate(Link link, Attach attach) {
+                mre.Set();
+            };
+
+            ClosedCallback closed = (AmqpObject amqp_obj, Error error) => {
+                error_name = error.Condition;
+                mre.Set();
+            };
+            session.Closed = closed;
+
+            SenderLink sender = new SenderLink(session,
+              "test-sender", new Target() { Address = dest }, attached);
+            mre.WaitOne();
+            Assert.That(error_name, Is.EqualTo(condition));
+
+            error_name = null;
+            mre.Reset();
+
+            Assert.That(
+              () => {
+                  ReceiverLink receiver = new ReceiverLink(session,
+                    "test-receiver", dest);
+                  receiver.Close();
+              },
+              Throws.TypeOf<Amqp.AmqpException>()
+              .With.Property("Error")
+              .With.Property("Condition").EqualTo(new Symbol(condition)));
+
+            session.Closed = null;
+            session.Close();
+            connection.Close();
+        }
+
+        internal string get_broker_uri()
+        {
+            TestParameters parameters = TestContext.Parameters;
+            string uri = parameters["rmq_broker_uri"];
+            if (uri == null)
+                uri =
+                  System.Environment.GetEnvironmentVariable("RMQ_BROKER_URI");
+            Assert.That(uri, Is.Not.Null);
+
+            return uri;
+        }
+    }
+}
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/pom.xml b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/pom.xml
new file mode 100644 (file)
index 0000000..53f533e
--- /dev/null
@@ -0,0 +1,38 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>com.rabbitmq.amqp1_0.tests.proton</groupId>
+  <artifactId>rabbitmq-amqp1.0-java-tests</artifactId>
+  <packaging>jar</packaging>
+  <version>1.0-SNAPSHOT</version>
+  <name>rabbitmq-amqp1.0-java-tests</name>
+  <url>http://www.rabbitmq.com</url>
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>4.12</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.qpid</groupId>
+      <artifactId>proton-j</artifactId>
+      <version>0.13.0</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <version>2.19.1</version>
+        <configuration>
+          <systemPropertyVariables>
+            <rmq_broker_uri>${rmq_broker_uri}</rmq_broker_uri>
+          </systemPropertyVariables>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+</project>
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/proton/FooterTest.java b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/proton/FooterTest.java
new file mode 100644 (file)
index 0000000..8cb85a6
--- /dev/null
@@ -0,0 +1,91 @@
+// vim:sw=4:et:
+
+package com.rabbitmq.amqp1_0.tests.proton;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import org.apache.qpid.proton.message.Message;
+import org.apache.qpid.proton.message.impl.MessageImpl;
+import org.apache.qpid.proton.messenger.Messenger;
+import org.apache.qpid.proton.messenger.impl.MessengerImpl;
+import org.apache.qpid.proton.amqp.messaging.AmqpValue;
+import org.apache.qpid.proton.amqp.messaging.Footer;
+import org.apache.qpid.proton.amqp.Symbol;
+
+import java.util.Map;
+import java.util.HashMap;
+
+/**
+ * Unit test for simple App.
+ */
+public class FooterTest
+    extends TestCase
+{
+    public static final String ADDRESS = "/footer-q";
+    public static final String PAYLOAD = "Payload";
+
+    /**
+     * Create the test case
+     *
+     * @param testName name of the test case
+     */
+    public FooterTest(String testName)
+    {
+        super(testName);
+    }
+
+    /**
+     * @return the suite of tests being tested
+     */
+    public static Test suite()
+    {
+        return new TestSuite(FooterTest.class);
+    }
+
+    public void test_footer()
+    {
+        String uri = System.getProperty("rmq_broker_uri");
+        assertNotNull(uri);
+        String address = uri + ADDRESS;
+
+        Messenger mng = new MessengerImpl();
+        Message sent_msg, received_msg;
+
+        mng.setTimeout(1000);
+        try {
+            mng.start();
+        } catch (Exception e) {
+            fail();
+        }
+
+        sent_msg = new MessageImpl();
+        sent_msg.setAddress(address);
+        sent_msg.setBody(new AmqpValue(PAYLOAD));
+
+        Map<Symbol, Object> map = new HashMap<Symbol, Object>();
+        map.put(Symbol.valueOf("key1"), "value1");
+        map.put(Symbol.valueOf("key2"), "value2");
+        Footer annotations = new Footer(map);
+        sent_msg.setFooter(annotations);
+
+        mng.put(sent_msg);
+        mng.send();
+
+        mng.subscribe(address);
+        mng.recv();
+        received_msg = mng.get();
+
+        assertEquals(sent_msg.getSubject(),
+          received_msg.getSubject());
+        assertEquals(sent_msg.getContentType(),
+          received_msg.getContentType());
+        assertEquals(sent_msg.getBody().toString(),
+          received_msg.getBody().toString());
+        assertEquals(sent_msg.getFooter().toString(),
+          received_msg.getFooter().toString());
+
+        mng.stop();
+    }
+}
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/proton/MessageAnnotationsTest.java b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/proton/MessageAnnotationsTest.java
new file mode 100644 (file)
index 0000000..d056bc3
--- /dev/null
@@ -0,0 +1,91 @@
+// vim:sw=4:et:
+
+package com.rabbitmq.amqp1_0.tests.proton;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import org.apache.qpid.proton.message.Message;
+import org.apache.qpid.proton.message.impl.MessageImpl;
+import org.apache.qpid.proton.messenger.Messenger;
+import org.apache.qpid.proton.messenger.impl.MessengerImpl;
+import org.apache.qpid.proton.amqp.messaging.AmqpValue;
+import org.apache.qpid.proton.amqp.messaging.MessageAnnotations;
+import org.apache.qpid.proton.amqp.Symbol;
+
+import java.util.Map;
+import java.util.HashMap;
+
+/**
+ * Unit test for simple App.
+ */
+public class MessageAnnotationsTest
+    extends TestCase
+{
+    public static final String ADDRESS = "/message_annotations-q";
+    public static final String PAYLOAD = "Payload";
+
+    /**
+     * Create the test case
+     *
+     * @param testName name of the test case
+     */
+    public MessageAnnotationsTest(String testName)
+    {
+        super(testName);
+    }
+
+    /**
+     * @return the suite of tests being tested
+     */
+    public static Test suite()
+    {
+        return new TestSuite(MessageAnnotationsTest.class);
+    }
+
+    public void test_message_annotations()
+    {
+        String uri = System.getProperty("rmq_broker_uri");
+        assertNotNull(uri);
+        String address = uri + ADDRESS;
+
+        Messenger mng = new MessengerImpl();
+        Message sent_msg, received_msg;
+
+        mng.setTimeout(1000);
+        try {
+            mng.start();
+        } catch (Exception e) {
+            fail();
+        }
+
+        sent_msg = new MessageImpl();
+        sent_msg.setAddress(address);
+        sent_msg.setBody(new AmqpValue(PAYLOAD));
+
+        Map<Symbol, Object> map = new HashMap<Symbol, Object>();
+        map.put(Symbol.valueOf("key1"), "value1");
+        map.put(Symbol.valueOf("key2"), "value2");
+        MessageAnnotations annotations = new MessageAnnotations(map);
+        sent_msg.setMessageAnnotations(annotations);
+
+        mng.put(sent_msg);
+        mng.send();
+
+        mng.subscribe(address);
+        mng.recv();
+        received_msg = mng.get();
+
+        assertEquals(sent_msg.getSubject(),
+          received_msg.getSubject());
+        assertEquals(sent_msg.getContentType(),
+          received_msg.getContentType());
+        assertEquals(sent_msg.getBody().toString(),
+          received_msg.getBody().toString());
+        assertEquals(sent_msg.getMessageAnnotations().toString(),
+          received_msg.getMessageAnnotations().toString());
+
+        mng.stop();
+    }
+}
diff --git a/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/proton/RoundTripTest.java b/rabbitmq-server/deps/rabbitmq_amqp1_0/test/system_SUITE_data/java-tests/src/test/java/com/rabbitmq/amqp1_0/tests/proton/RoundTripTest.java
new file mode 100644 (file)
index 0000000..16d82da
--- /dev/null
@@ -0,0 +1,77 @@
+// vim:sw=4:et:
+
+package com.rabbitmq.amqp1_0.tests.proton;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import org.apache.qpid.proton.message.Message;
+import org.apache.qpid.proton.message.impl.MessageImpl;
+import org.apache.qpid.proton.messenger.Messenger;
+import org.apache.qpid.proton.messenger.impl.MessengerImpl;
+import org.apache.qpid.proton.amqp.messaging.AmqpValue;
+
+/**
+ * Unit test for simple App.
+ */
+public class RoundTripTest
+    extends TestCase
+{
+    public static final String ADDRESS = "/roundtrip-q";
+    public static final String PAYLOAD = "Payload";
+
+    /**
+     * Create the test case
+     *
+     * @param testName name of the test case
+     */
+    public RoundTripTest(String testName)
+    {
+        super(testName);
+    }
+
+    /**
+     * @return the suite of tests being tested
+     */
+    public static Test suite()
+    {
+        return new TestSuite(RoundTripTest.class);
+    }
+
+    public void test_roundtrip()
+    {
+        String uri = System.getProperty("rmq_broker_uri");
+        assertNotNull(uri);
+        String address = uri + ADDRESS;
+
+        Messenger mng = new MessengerImpl();
+        Message sent_msg, received_msg;
+
+        mng.setTimeout(1000);
+        try {
+            mng.start();
+        } catch (Exception e) {
+            fail();
+        }
+
+        sent_msg = new MessageImpl();
+        sent_msg.setAddress(address);
+        sent_msg.setBody(new AmqpValue(PAYLOAD));
+        mng.put(sent_msg);
+        mng.send();
+
+        mng.subscribe(address);
+        mng.recv();
+        received_msg = mng.get();
+
+        assertEquals(sent_msg.getSubject(),
+          received_msg.getSubject());
+        assertEquals(sent_msg.getContentType(),
+          received_msg.getContentType());
+        assertEquals(sent_msg.getBody().toString(),
+          received_msg.getBody().toString());
+
+        mng.stop();
+    }
+}
similarity index 84%
rename from rabbitmq-server/deps/rabbitmq_amqp1_0/test/src/rabbit_amqp1_0_test.erl
rename to rabbitmq-server/deps/rabbitmq_amqp1_0/test/unit_SUITE.erl
index 211a6d5e88d73e071408eebff3bf1fb7def8fe12..83ac8a9162ad55c3415a758bf04abf8d52cb5d38 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_amqp1_0_test).
+-module(unit_SUITE).
 
--include("rabbit_amqp1_0.hrl").
+-include_lib("common_test/include/ct.hrl").
 -include_lib("eunit/include/eunit.hrl").
 
+-include("rabbit_amqp1_0.hrl").
+
 -import(rabbit_amqp1_0_util, [serial_add/2, serial_diff/2, serial_compare/2]).
 
-serial_arithmetic_test() ->
+-compile(export_all).
+
+all() ->
+    [
+      serial_arithmetic
+    ].
+
+-include_lib("eunit/include/eunit.hrl").
+
+serial_arithmetic(_Config) ->
     ?assertEqual(1, serial_add(0, 1)),
     ?assertEqual(16#7fffffff, serial_add(0, 16#7fffffff)),
     ?assertEqual(0, serial_add(16#ffffffff, 1)),
@@ -34,5 +45,4 @@ serial_arithmetic_test() ->
     ?assertExit({indeterminate_serial_diff, _, _},
                 serial_diff(0, 16#80000000)),
     ?assertExit({indeterminate_serial_diff, _, _},
-                serial_diff(16#ffffffff, 16#7fffffff)),
-    passed.
+                serial_diff(16#ffffffff, 16#7fffffff)).
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index 25e042a3666fd9e7d24b856281b8e5870cd1fbfc..f11c306a0b448217d3efd0a42b59859f44dd74a0 100644 (file)
@@ -1,6 +1,8 @@
 PROJECT = rabbitmq_auth_backend_ldap
 
 DEPS = amqp_client
+TEST_DEPS = rabbit amqp_client ct_helper
+dep_ct_helper = git https://github.com/extend/ct_helper.git master
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
@@ -11,19 +13,5 @@ ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
 ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
 include erlang.mk
-
-# --------------------------------------------------------------------
-# Testing.
-# --------------------------------------------------------------------
-
-ifneq ($(filter tests tests-with-broker test,$(MAKECMDGOALS)),)
-ifeq ($(shell nc -z localhost 389 && echo true),true)
-WITH_BROKER_TEST_MAKEVARS := \
-       RABBITMQ_CONFIG_FILE=$(CURDIR)/etc/rabbit-test
-WITH_BROKER_TEST_COMMANDS := \
-       eunit:test([rabbit_auth_backend_ldap_unit_test,rabbit_auth_backend_ldap_test],[verbose])
-else
-$(info Skipping LDAP tests; no LDAP server found on localhost)
-endif
-endif
index 423adda20a7b58e84a772a3fda3d67de6c756975..5196276fa1445fc8594df2226ca75c891dc7c978 100644 (file)
@@ -1,16 +1,16 @@
 # Running LDAP Backend Tests
 
-The tests *require* a locally installed LDAP server with some
-predefined objects inside. If there's no LDAP server running on port
-389, they will be skipped.
+If you have [Vagrant](https://www.vagrantup.com) installed you
+can simply `vagrant up` from the root of the project directory.
+This will start a vagrant box with OpenLDAP running, accessible
+on local port 3890.
+Alternatively run OpenLDAP locally on port 3890 and use
+`example/setup.sh` to create the appropriate ldap databases.
 
-On a Debian-based distro you can set up a LDAP server
-and run the tests with:
+IMPORTANT: this will wipe out your local OpenLDAP installation!
+The setup script currently needs to be executed between test suite runs,
+too.
 
-  ./example/setup.sh
-   make tests
+The test setup will seed the LDAP database with the required objects.
 
-but be aware that this will wipe out your local OpenLDAP installation.
-
-Poke around in example/ if using any other distro, you can probably
-make it work.
+Run `make test` to run the complete test suite.
index 69fea5c60acd715fdec28c4343cfa1a4567ac28b..2cadcd82dbb7129391c974f46b8f2d7e48e58e88 100644 (file)
@@ -3,17 +3,31 @@
 This plugin provides [authentication and authorisation backends](http://rabbitmq.com/access-control.html)
 for RabbitMQ that use LDAP.
 
-## Requirements
+## Installation
 
-You can build and install it like any other plugin (see
-http://www.rabbitmq.com/plugin-development.html).
+This plugin ships with reasonably recent RabbitMQ versions
+(e.g. `3.3.0` or later). Enable it with
+
+    rabbitmq-plugins enable rabbitmq_auth_backend_ldap
 
 ## Documentation
 
 [See LDAP guide](http://www.rabbitmq.com/ldap.html) on rabbitmq.com.
 
-## Limitations
 
-Prior to RabbitMQ 3.6.0, this plugin opened a new LDAP server
-connection for every operation. 3.6.0 and later versions use
-a pool of connections.
+## Building from Source
+
+See [Plugin Development guide](http://www.rabbitmq.com/plugin-development.html).
+
+TL;DR: running
+
+    make dist
+
+will build the plugin and put build artifacts under the `./plugins` directory.
+
+
+## Copyright and License
+
+(c) Pivotal Software Inc, 2007-20016
+
+Released under the MPL, the same license as RabbitMQ.
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/Vagrantfile b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/Vagrantfile
new file mode 100644 (file)
index 0000000..543ff50
--- /dev/null
@@ -0,0 +1,76 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# All Vagrant configuration is done below. The "2" in Vagrant.configure
+# configures the configuration version (we support older styles for
+# backwards compatibility). Please don't change it unless you know what
+# you're doing.
+Vagrant.configure(2) do |config|
+  # The most common configuration options are documented and commented below.
+  # For a complete reference, please see the online documentation at
+  # https://docs.vagrantup.com.
+
+  # Every Vagrant development environment requires a box. You can search for
+  # boxes at https://atlas.hashicorp.com/search.
+  config.vm.box = "ubuntu/trusty64"
+
+  # Disable automatic box update checking. If you disable this, then
+  # boxes will only be checked for updates when the user runs
+  # `vagrant box outdated`. This is not recommended.
+  # config.vm.box_check_update = false
+
+  # Create a forwarded port mapping which allows access to a specific port
+  # within the machine from a port on the host machine. In the example below,
+  # accessing "localhost:8080" will access port 80 on the guest machine.
+  config.vm.network "forwarded_port", guest: 389, host: 3890
+
+  # Create a private network, which allows host-only access to the machine
+  # using a specific IP.
+  # config.vm.network "private_network", ip: "192.168.33.10"
+
+  # Create a public network, which generally matched to bridged network.
+  # Bridged networks make the machine appear as another physical device on
+  # your network.
+  # config.vm.network "public_network"
+
+  # Share an additional folder to the guest VM. The first argument is
+  # the path on the host to the actual folder. The second argument is
+  # the path on the guest to mount the folder. And the optional third
+  # argument is a set of non-required options.
+  # config.vm.synced_folder "../data", "/vagrant_data"
+
+  # Provider-specific configuration so you can fine-tune various
+  # backing providers for Vagrant. These expose provider-specific options.
+  # Example for VirtualBox:
+  #
+  # config.vm.provider "virtualbox" do |vb|
+  #   # Display the VirtualBox GUI when booting the machine
+  #   vb.gui = true
+  #
+  #   # Customize the amount of memory on the VM:
+  #   vb.memory = "1024"
+  # end
+  #
+  # View the documentation for the provider you are using for more
+  # information on available options.
+
+  # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies
+  # such as FTP and Heroku are also available. See the documentation at
+  # https://docs.vagrantup.com/v2/push/atlas.html for more information.
+  # config.push.define "atlas" do |push|
+  #   push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME"
+  # end
+
+  # Enable provisioning with a shell script. Additional provisioners such as
+  # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
+  # documentation for more information about their specific syntax and use.
+  config.vm.provision "shell", inline: "sudo apt-get -y update"
+  config.vm.provision "file", source: "example", destination: "~"
+  config.vm.provision "shell", inline: "/bin/sh /home/vagrant/example/setup.sh"
+  # config.vm.provision "shell", inline: <<-SHELL
+  #   sudo apt-get update
+  #   sudo apt-get --yes install slapd ldap-utils
+  #   sleep(1)
+
+  # SHELL
+end
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/etc/rabbit-test.config b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/etc/rabbit-test.config
deleted file mode 100644 (file)
index b65d9c4..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-%% -*- erlang -*-
-[{rabbit, [{default_vhost, <<"test">>}]},
- {rabbitmq_auth_backend_ldap,
-  [ {servers,            ["localhost"]},
-    {user_dn_pattern,    "cn=${username},ou=People,dc=example,dc=com"},
-    {other_bind,         anon},
-    {use_ssl,            false},
-    {port,               389},
-    {log,                true},
-    {tag_queries,        [{administrator, {constant, false}}]},
-    {vhost_access_query, {exists, "ou=${vhost},ou=vhosts,dc=example,dc=com"}},
-    {resource_access_query,
-     {for, [{resource, exchange,
-             {for, [{permission, configure,
-                     {in_group, "cn=wheel,ou=groups,dc=example,dc=com"}
-                    },
-                    {permission, write, {constant, true}},
-                    {permission, read,
-                     {match, {string, "${name}"},
-                             {string, "^xch-${username}-.*"}}
-                    }
-                   ]}},
-            {resource, queue,
-             {for, [{permission, configure,
-                     {match, {attribute, "${user_dn}", "description"},
-                             {string, "can-declare-queues"}}
-                    },
-                    {permission, write, {constant, true}},
-                    {permission, read,
-                     {'or',
-                      [{'and',
-                        [{equals, "${name}", "test1"},
-                         {equals, "${username}", "Simon MacMullen"}]},
-                       {'and',
-                        [{equals, "${name}", "test2"},
-                         {'not', {equals, "${username}", "Mike Bridgen"}}]}
-                      ]}}
-                   ]}}
-            ]}},
-    {tag_queries, [{administrator, {constant, false}},
-                   {management,    {constant, false}}]}
-  ]}
-].
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/README b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/README
deleted file mode 100644 (file)
index d2969ac..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-This is a very simple example, designed to be set up with the modern
-Debian / Ubuntu packaging of OpenLDAP. Running setup.sh after "apt-get
-install slapd" will wipe out any existing LDAP database and get you:
-
-* A domain
-* An admin user
-* A couple of normal users
-* A group containing the users
-* An OU representing a vhost
-
-These correspond to the examples mentioned in the documentation.
index 431a1f1c3e44d91d7bdc32142695381fce9650b5..373d9d9951b443f21fdd08fe63259280a2ab92c5 100644 (file)
@@ -9,19 +9,19 @@ dn: olcDatabase=bdb,cn=config
 objectClass: olcDatabaseConfig
 objectClass: olcBdbConfig
 olcDatabase: bdb
-# Domain name (e.g. example.com)
-olcSuffix: dc=example,dc=com
+# Domain name (e.g. rabbitmq.com)
+olcSuffix: dc=rabbitmq,dc=com
 # Location on system where database is stored
 olcDbDirectory: /var/lib/ldap
 # Manager of the database
-olcRootDN: cn=admin,dc=example,dc=com
+olcRootDN: cn=admin,dc=rabbitmq,dc=com
 olcRootPW: admin
 olcAccess: to attrs=userPassword
   by self write
   by anonymous auth
-  by dn.base="cn=admin,dc=example,dc=com" write
+  by dn.base="cn=admin,dc=rabbitmq,dc=com" write
   by * none
 olcAccess: to *
   by self write
-  by dn.base="cn=admin,dc=example,dc=com" write
+  by dn.base="cn=admin,dc=rabbitmq,dc=com" write
   by * read
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/groups.ldif b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/groups.ldif
deleted file mode 100644 (file)
index 82402be..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-dn: ou=groups,dc=example,dc=com
-objectclass:organizationalunit
-ou: groups
-
-dn: cn=wheel,ou=groups,dc=example,dc=com
-objectclass: groupOfNames
-cn: wheel
-member: cn=Simon MacMullen,ou=people,dc=example,dc=com
-member: cn=Jean-Sebastien Pedron,ou=people,dc=example,dc=com
-member: cn=Michael Klishin,ou=people,dc=example,dc=com
-
-dn: cn=people,ou=groups,dc=example,dc=com
-objectclass: groupOfNames
-cn: people
-member: cn=Jean-Sebastien Pedron,ou=people,dc=example,dc=com
-member: cn=Michael Klishin,ou=people,dc=example,dc=com
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/memberof_init.ldif b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/memberof_init.ldif
new file mode 100644 (file)
index 0000000..6301e93
--- /dev/null
@@ -0,0 +1,17 @@
+dn: cn=module,cn=config
+cn: module
+objectClass: olcModuleList
+olcModuleLoad: memberof
+olcModulePath: /usr/lib/ldap
+
+dn: olcOverlay={0}memberof,olcDatabase={1}bdb,cn=config
+objectClass: olcConfig
+objectClass: olcMemberOf
+objectClass: olcOverlayConfig
+objectClass: top
+olcOverlay: memberof
+olcMemberOfDangling: ignore
+olcMemberOfRefInt: TRUE
+olcMemberOfGroupOC: groupOfNames
+olcMemberOfMemberAD: member
+olcMemberOfMemberOfAD: memberOf
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/people.ldif b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/people.ldif
deleted file mode 100644 (file)
index 5ec0b8c..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-dn: dc=example,dc=com
-objectClass: top
-objectClass: dcObject
-objectclass: organization
-o: example.com
-dc: example
-description: Example
-
-dn: ou=people,dc=example,dc=com
-objectClass: organizationalUnit
-ou: people
-
-dn: cn=Simon MacMullen,ou=people,dc=example,dc=com
-objectClass: person
-cn: Simon MacMullen
-sn: MacMullen
-userPassword: password
-description: can-declare-queues
-
-dn: cn=Mike Bridgen,ou=people,dc=example,dc=com
-objectClass: person
-cn: Mike Bridgen
-sn: Bridgen
-userPassword: password
-
-dn: cn=Michael Klishin,ou=people,dc=example,dc=com
-objectClass: person
-cn: Michael Klishin
-sn: Klishin
-userPassword: password
-
-dn: cn=Jean-Sebastien Pedron,ou=people,dc=example,dc=com
-objectClass: person
-cn: Jean-Sebastien Pedron
-sn: Pedron
-userPassword: password
-
-dn: cn=John Doe,ou=people,dc=example,dc=com
-objectClass: person
-cn: John Doe
-sn: Doe
-userPassword: password
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/rabbit.ldif b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/rabbit.ldif
deleted file mode 100644 (file)
index e43eac9..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-dn: ou=vhosts,dc=example,dc=com
-objectClass: organizationalUnit
-ou: vhosts
-
-dn: ou=test,ou=vhosts,dc=example,dc=com
-objectClass: top
-objectClass: organizationalUnit
-ou: test
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/refint_1.ldif b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/refint_1.ldif
new file mode 100644 (file)
index 0000000..420f454
--- /dev/null
@@ -0,0 +1,3 @@
+dn: cn=module{1},cn=config
+add: olcmoduleload
+olcmoduleload: refint
\ No newline at end of file
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/refint_2.ldif b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/example/refint_2.ldif
new file mode 100644 (file)
index 0000000..0955a1a
--- /dev/null
@@ -0,0 +1,7 @@
+dn: olcOverlay={1}refint,olcDatabase={1}bdb,cn=config
+objectClass: olcConfig
+objectClass: olcOverlayConfig
+objectClass: olcRefintConfig
+objectClass: top
+olcOverlay: {1}refint
+olcRefintAttribute: memberof member manager owner
index cf7611d5d34f6fb3a1d450ac5a7bfca19b070ba2..9082bedb161e7ec0c4f8943b6cd80e9eb2b1f875 100755 (executable)
@@ -3,6 +3,6 @@
 DIR=$(dirname $0)
 
 sudo ldapadd -Y EXTERNAL -H ldapi:/// -f ${DIR}/global.ldif
-ldapadd -x -D cn=admin,dc=example,dc=com -w admin -f ${DIR}/people.ldif
-ldapadd -x -D cn=admin,dc=example,dc=com -w admin -f ${DIR}/groups.ldif
-ldapadd -x -D cn=admin,dc=example,dc=com -w admin -f ${DIR}/rabbit.ldif
+sudo ldapadd -Q -Y EXTERNAL -H ldapi:/// -f ${DIR}/memberof_init.ldif
+sudo ldapmodify -Q -Y EXTERNAL -H ldapi:/// -f ${DIR}/refint_1.ldif
+sudo ldapadd -Q -Y EXTERNAL -H ldapi:/// -f ${DIR}/refint_2.ldif
index 5c227f29e3bd62fa90e0deae8e39f45169c5af53..8194efec84ebf58918cd77465fd245f7afc603bd 100755 (executable)
@@ -1,10 +1,17 @@
 #!/bin/sh -e
-
+export DEBIAN_FRONTEND=noninteractive
 sudo apt-get --yes purge slapd
 sudo rm -rf /var/lib/ldap
+echo -e " \
+slapd    slapd/internal/generated_adminpw    password   openstack
+slapd    slapd/password2    password    openstack
+slapd    slapd/internal/adminpw    password openstack
+slapd    slapd/password1    password    openstack
+slapd    slapd/backend    select    BDB
+" | sudo debconf-set-selections
 sudo apt-get --yes install slapd ldap-utils
 sleep 1
 
 DIR=$(dirname $0)
 
-./$DIR/seed.sh
+$DIR/seed.sh
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index 3487173e1f26b8620b46a1fd74fc9fdb0022a507..7915caf90284b3d1afeb2ea2d013a27886b3c06c 100644 (file)
@@ -30,6 +30,7 @@
 -define(L(F, A),  log("LDAP "         ++ F, A)).
 -define(L1(F, A), log("    LDAP "     ++ F, A)).
 -define(L2(F, A), log("        LDAP " ++ F, A)).
+-define(SCRUBBED_CREDENTIAL,  "xxxx").
 
 -import(rabbit_misc, [pget/2]).
 
@@ -46,24 +47,27 @@ user_login_authentication(Username, []) ->
        [Username, log_result(R)]),
     R;
 
-user_login_authentication(Username, [{password, <<>>}]) ->
-    %% Password "" is special in LDAP, see
-    %% https://tools.ietf.org/html/rfc4513#section-5.1.2
-    ?L("CHECK: unauthenticated login for ~s", [Username]),
-    ?L("DECISION: unauthenticated login for ~s: denied", [Username]),
-    {refused, "user '~s' - unauthenticated bind not allowed", [Username]};
-
-user_login_authentication(User, [{password, PW}]) ->
-    ?L("CHECK: login for ~s", [User]),
-    R = case dn_lookup_when() of
-            prebind -> UserDN = username_to_dn_prebind(User),
-                       with_ldap({ok, {UserDN, PW}},
-                                 fun(L) -> do_login(User, UserDN,  PW, L) end);
-            _       -> with_ldap({ok, {fill_user_dn_pattern(User), PW}},
-                                 fun(L) -> do_login(User, unknown, PW, L) end)
-        end,
-    ?L("DECISION: login for ~s: ~p", [User, log_result(R)]),
-    R;
+user_login_authentication(Username, AuthProps) when is_list(AuthProps) ->
+    case pget(password, AuthProps) of
+        undefined -> user_login_authentication(Username, []);
+        <<>> ->
+            %% Password "" is special in LDAP, see
+            %% https://tools.ietf.org/html/rfc4513#section-5.1.2
+            ?L("CHECK: unauthenticated login for ~s", [Username]),
+            ?L("DECISION: unauthenticated login for ~s: denied", [Username]),
+            {refused, "user '~s' - unauthenticated bind not allowed", [Username]};
+        PW ->
+            ?L("CHECK: login for ~s", [Username]),
+            R = case dn_lookup_when() of
+                    prebind -> UserDN = username_to_dn_prebind(Username),
+                               with_ldap({ok, {UserDN, PW}},
+                                         login_fun(Username, UserDN, PW, AuthProps));
+                    _       -> with_ldap({ok, {fill_user_dn_pattern(Username), PW}},
+                                         login_fun(Username, unknown, PW, AuthProps))
+                end,
+            ?L("DECISION: login for ~s: ~p", [Username, log_result(R)]),
+            R
+    end;
 
 user_login_authentication(Username, AuthProps) ->
     exit({unknown_auth_props, Username, AuthProps}).
@@ -142,44 +146,66 @@ evaluate0({in_group, DNPattern, Desc}, Args,
     ?L1("evaluated in_group for \"~s\": ~p", [DN, R]),
     R;
 
+evaluate0({in_group_nested, DNPattern}, Args, User, LDAP) ->
+       evaluate({in_group_nested, DNPattern, "member", subtree},
+             Args, User, LDAP);
+evaluate0({in_group_nested, DNPattern, Desc}, Args, User, LDAP) ->
+    evaluate({in_group_nested, DNPattern, Desc, subtree},
+             Args, User, LDAP);
+evaluate0({in_group_nested, DNPattern, Desc, Scope}, Args,
+          #auth_user{impl = #impl{user_dn = UserDN}}, LDAP) ->
+    GroupsBase = case env(group_lookup_base) of
+        none -> env(dn_lookup_base);
+        B    -> B
+    end,
+    GroupDN = fill(DNPattern, Args),
+    EldapScope =
+        case Scope of
+            subtree      -> eldap:wholeSubtree();
+            singlelevel  -> eldap:singleLevel();
+            single_level -> eldap:singleLevel();
+            onelevel     -> eldap:singleLevel();
+            one_level    -> eldap:singleLevel()
+        end,
+    search_nested_group(LDAP, Desc, GroupsBase, EldapScope, UserDN, GroupDN, []);
+
 evaluate0({'not', SubQuery}, Args, User, LDAP) ->
     R = evaluate(SubQuery, Args, User, LDAP),
     ?L1("negated result to ~s", [R]),
     not R;
 
 evaluate0({'and', Queries}, Args, User, LDAP) when is_list(Queries) ->
-    R = lists:foldl(fun (Q,  true)  -> evaluate(Q, Args, User, LDAP);
-                        (_Q, false) -> false
+    R = lists:foldl(fun (Q,  true)    -> evaluate(Q, Args, User, LDAP);
+                        % Treat any non-true result as false
+                        (_Q, _Result) -> false
                     end, true, Queries),
     ?L1("'and' result: ~s", [R]),
     R;
 
 evaluate0({'or', Queries}, Args, User, LDAP) when is_list(Queries) ->
-    R = lists:foldl(fun (_Q, true)  -> true;
-                        (Q,  false) -> evaluate(Q, Args, User, LDAP)
+    R = lists:foldl(fun (_Q, true)    -> true;
+                        % Treat any non-true result as false
+                        (Q,  _Result) -> evaluate(Q, Args, User, LDAP)
                     end, false, Queries),
     ?L1("'or' result: ~s", [R]),
     R;
 
 evaluate0({equals, StringQuery1, StringQuery2}, Args, User, LDAP) ->
     safe_eval(fun (String1, String2) ->
-                      R = String1 =:= String2,
+                      R  = if String1 =:= String2 -> true;
+                              true -> is_multi_attr_member(String1, String2)
+                           end,
                       ?L1("evaluated equals \"~s\", \"~s\": ~s",
-                          [String1, String2, R]),
+                          [format_multi_attr(String1),
+                           format_multi_attr(String2), R]),
                       R
               end,
               evaluate(StringQuery1, Args, User, LDAP),
               evaluate(StringQuery2, Args, User, LDAP));
 
 evaluate0({match, StringQuery, REQuery}, Args, User, LDAP) ->
-    safe_eval(fun (String, RE) ->
-                      R = case re:run(String, RE) of
-                              {match, _} -> true;
-                              nomatch    -> false
-                          end,
-                      ?L1("evaluated match \"~s\" against RE \"~s\": ~s",
-                          [String, RE, R]),
-                      R
+    safe_eval(fun (String1, String2) ->
+                      do_match(String1, String2)
               end,
               evaluate(StringQuery, Args, User, LDAP),
               evaluate(REQuery, Args, User, LDAP));
@@ -196,16 +222,79 @@ evaluate0({attribute, DNPattern, AttributeName}, Args, _User, LDAP) ->
     DN = fill(DNPattern, Args),
     R = attribute(DN, AttributeName, LDAP),
     ?L1("evaluated attribute \"~s\" for \"~s\": ~p",
-        [AttributeName, DN, R]),
+        [AttributeName, DN, format_multi_attr(R)]),
     R;
 
 evaluate0(Q, Args, _User, _LDAP) ->
     {error, {unrecognised_query, Q, Args}}.
 
+search_groups(LDAP, Desc, GroupsBase, Scope, DN) ->
+    Filter = eldap:equalityMatch(Desc, DN),
+    case eldap:search(LDAP,
+                      [{base, GroupsBase},
+                       {filter, Filter},
+                       {attributes, ["dn"]},
+                       {scope, Scope}]) of
+        {error, _} = E ->
+            ?L("error searching for parent groups for \"~s\": ~p", [DN, E]),
+            [];
+        {ok, #eldap_search_result{entries = []}} ->
+            [];
+        {ok, #eldap_search_result{entries = Entries}} ->
+            [ON || #eldap_entry{object_name = ON} <- Entries]
+    end.
+
+search_nested_group(LDAP, Desc, GroupsBase, Scope, CurrentDN, TargetDN, Path) ->
+    case lists:member(CurrentDN, Path) of
+        true  ->
+            ?L("recursive cycle on DN ~s while searching for group ~s",
+               [CurrentDN, TargetDN]),
+            false;
+        false ->
+            GroupDNs = search_groups(LDAP, Desc, GroupsBase, Scope, CurrentDN),
+            case lists:member(TargetDN, GroupDNs) of
+                true  ->
+                    true;
+                false ->
+                    NextPath = [CurrentDN | Path],
+                    lists:any(fun(DN) ->
+                        search_nested_group(LDAP, Desc, GroupsBase, Scope,
+                                            DN, TargetDN, NextPath)
+                    end,
+                    GroupDNs)
+            end
+    end.
+
 safe_eval(_F, {error, _}, _)          -> false;
 safe_eval(_F, _,          {error, _}) -> false;
 safe_eval(F,  V1,         V2)         -> F(V1, V2).
 
+do_match(S1, S2) ->
+    case re:run(S1, S2) of
+        {match, _} -> log_match(S1, S2, R = true),
+                      R;
+        nomatch    ->
+            %% Do match bidirectionally, if intial RE consists of
+            %% multi attributes, else log match and return result.
+            case S2 of
+                S when length(S) > 1 ->
+                    R = case re:run(S2, S1) of
+                            {match, _} -> true;
+                            nomatch    -> false
+                        end,
+                    log_match(S2, S1, R),
+                    R;
+                _ ->
+                    log_match(S1, S2, R = false),
+                    R
+            end
+    end.
+
+log_match(String, RE, Result) ->
+    ?L1("evaluated match \"~s\" against RE \"~s\": ~s",
+        [format_multi_attr(String),
+         format_multi_attr(RE), Result]).
+
 object_exists(DN, Filter, LDAP) ->
     case eldap:search(LDAP,
                       [{base, DN},
@@ -223,11 +312,8 @@ attribute(DN, AttributeName, LDAP) ->
                       [{base, DN},
                        {filter, eldap:present("objectClass")},
                        {attributes, [AttributeName]}]) of
-        {ok, #eldap_search_result{entries = [#eldap_entry{attributes = A}]}} ->
-            case pget(AttributeName, A) of
-                [Attr] -> Attr;
-                _      -> {error, not_found}
-            end;
+        {ok, #eldap_search_result{entries = E = [#eldap_entry{}|_]}} ->
+            get_attributes(AttributeName, E);
         {ok, #eldap_search_result{entries = _}} ->
             {error, not_found};
         {error, _} = E ->
@@ -258,7 +344,15 @@ with_ldap({ok, Creds}, Fun, Servers) ->
                     rabbit_log:info(
                       "    LDAP connecting to servers: ~p~n", [Servers]),
                     [{log, fun(1, S, A) -> rabbit_log:warning(Pre ++ S, A);
-                              (2, S, A) -> rabbit_log:info   (Pre ++ S, A)
+                              (2, S, A) ->
+                                   rabbit_log:info(Pre ++ S, scrub_creds(A, []))
+                           end} | Opts0];
+                network_unsafe ->
+                    Pre = "    LDAP network traffic: ",
+                    rabbit_log:info(
+                      "    LDAP connecting to servers: ~p~n", [Servers]),
+                    [{log, fun(1, S, A) -> rabbit_log:warning(Pre ++ S, A);
+                              (2, S, A) -> rabbit_log:info(   Pre ++ S, A)
                            end} | Opts0];
                 _ ->
                     Opts0
@@ -274,8 +368,9 @@ with_ldap({ok, Creds}, Fun, Servers) ->
               case with_login(Creds, Servers, Opts, Fun) of
                   {error, {gen_tcp_error, closed}} ->
                       %% retry with new connection
-                      ?L1("server closed connection", []),
+                      rabbit_log:warning("TCP connection to a LDAP server is already closed.~n"),
                       purge_conn(Creds == anon, Servers, Opts),
+                      rabbit_log:warning("LDAP will retry with a new connection.~n"),
                       with_login(Creds, Servers, Opts, Fun);
                   Result -> Result
               end
@@ -291,14 +386,16 @@ with_login(Creds, Servers, Opts, Fun) ->
                 {UserDN, Password} ->
                     case eldap:simple_bind(LDAP, UserDN, Password) of
                         ok ->
-                            ?L1("bind succeeded: ~s", [UserDN]),
+                            ?L1("bind succeeded: ~s",
+                                [scrub_dn(UserDN, env(log))]),
                             Fun(LDAP);
                         {error, invalidCredentials} ->
                             ?L1("bind returned \"invalid credentials\": ~s",
-                                [UserDN]),
+                                [scrub_dn(UserDN, env(log))]),
                             {refused, UserDN, []};
                         {error, E} ->
-                            ?L1("bind error: ~s ~p", [UserDN, E]),
+                            ?L1("bind error: ~s ~p",
+                                [scrub_dn(UserDN, env(log)), E]),
                             {error, E}
                     end
             end;
@@ -316,19 +413,47 @@ get_or_create_conn(IsAnon, Servers, Opts) ->
     Key = {IsAnon, Servers, Opts},
     case dict:find(Key, Conns) of
         {ok, Conn} -> Conn;
-        error      -> 
+        error      ->
             case eldap_open(Servers, Opts) of
                 {ok, _} = Conn -> put(ldap_conns, dict:store(Key, Conn, Conns)), Conn;
                 Error -> Error
             end
     end.
 
+%% Get attribute(s) from eldap entry
+get_attributes(_AttrName, []) -> {error, not_found};
+get_attributes(AttrName, [#eldap_entry{attributes = A}|Rem]) ->
+    case pget(AttrName, A) of
+        [Attr|[]]                    -> Attr;
+        Attrs when length(Attrs) > 1 -> Attrs;
+        _                            -> get_attributes(AttrName, Rem)
+    end;
+get_attributes(AttrName, [_|Rem])    -> get_attributes(AttrName, Rem).
+
+%% Format multiple attribute values for logging
+format_multi_attr(Attrs) ->
+    format_multi_attr(io_lib:printable_list(Attrs), Attrs).
+
+format_multi_attr(true, Attrs)                     -> Attrs;
+format_multi_attr(_,    Attrs) when is_list(Attrs) -> string:join(Attrs, "; ");
+format_multi_attr(_,    Error)                     -> Error.
+
+
+%% In case of multiple attributes, check for equality bi-directionally
+is_multi_attr_member(Str1, Str2) ->
+    lists:member(Str1, Str2) orelse lists:member(Str2, Str1).
+
 purge_conn(IsAnon, Servers, Opts) ->
     Conns = get(ldap_conns),
     Key = {IsAnon, Servers, Opts},
     {_, {_, Conn}} = dict:find(Key, Conns),
-    ?L1("Purging dead server connection", []),
-    eldap:close(Conn), %% May already be closed
+    rabbit_log:warning("LDAP Purging an already closed LDAP server connection~n"),
+    % We cannot close the connection with eldap:close/1 because as of OTP-13327
+    % eldap will try to do_unbind first and will fail with a `{gen_tcp_error, closed}`.
+    % Since we know that the connection is already closed, we just
+    % kill its process.
+    unlink(Conn),
+    exit(Conn, closed),
     put(ldap_conns, dict:erase(Key, Conns)).
 
 eldap_open(Servers, Opts) ->
@@ -372,7 +497,17 @@ env(F) ->
     {ok, V} = application:get_env(rabbitmq_auth_backend_ldap, F),
     V.
 
+login_fun(User, UserDN, Password, AuthProps) ->
+    fun(L) -> case pget(vhost, AuthProps) of
+                  undefined -> do_login(User, UserDN, Password, L);
+                  VHost     -> do_login(User, UserDN, Password, VHost, L)
+              end
+    end.
+
 do_login(Username, PrebindUserDN, Password, LDAP) ->
+    do_login(Username, PrebindUserDN, Password, <<>>, LDAP).
+
+do_login(Username, PrebindUserDN, Password, VHost, LDAP) ->
     UserDN = case PrebindUserDN of
                  unknown -> username_to_dn(Username, LDAP, dn_lookup_when());
                  _       -> PrebindUserDN
@@ -380,30 +515,31 @@ do_login(Username, PrebindUserDN, Password, LDAP) ->
     User = #auth_user{username     = Username,
                       impl         = #impl{user_dn  = UserDN,
                                            password = Password}},
-    DTQ = fun (LDAPn) -> do_tag_queries(Username, UserDN, User, LDAPn) end,
+    DTQ = fun (LDAPn) -> do_tag_queries(Username, UserDN, User, VHost, LDAPn) end,
     TagRes = case env(other_bind) of
                  as_user -> DTQ(LDAP);
                  _       -> with_ldap(creds(User), DTQ)
              end,
     case TagRes of
-        {ok, L} -> case [E || {_, E = {error, _}} <- L] of
-                       []      -> Tags = [Tag || {Tag, true} <- L],
-                                  {ok, User#auth_user{tags = Tags}};
-                       [E | _] -> E
-                   end;
+        {ok, L} -> {ok, User#auth_user{tags = [Tag || {Tag, true} <- L]}};
         E       -> E
     end.
 
-do_tag_queries(Username, UserDN, User, LDAP) ->
+do_tag_queries(Username, UserDN, User, VHost, LDAP) ->
     {ok, [begin
               ?L1("CHECK: does ~s have tag ~s?", [Username, Tag]),
               R = evaluate(Q, [{username, Username},
-                               {user_dn,  UserDN}], User, LDAP),
+                               {user_dn,  UserDN} | vhost_if_defined(VHost)],
+                           User, LDAP),
               ?L1("DECISION: does ~s have tag ~s? ~p",
                   [Username, Tag, R]),
               {Tag, R}
           end || {Tag, Q} <- env(tag_queries)]}.
 
+vhost_if_defined([])    -> [];
+vhost_if_defined(<<>>)  -> [];
+vhost_if_defined(VHost) -> [{vhost, VHost}].
+
 dn_lookup_when() -> case {env(dn_lookup_attribute), env(dn_lookup_bind)} of
                         {none, _}       -> never;
                         {_,    as_user} -> postbind;
@@ -447,6 +583,58 @@ creds(#auth_user{impl = #impl{user_dn = UserDN, password = PW}}, as_user) ->
 creds(_, Creds) ->
     {ok, Creds}.
 
+%% Scrub credentials
+scrub_creds([], Acc)      -> lists:reverse(Acc);
+scrub_creds([H|Rem], Acc) ->
+    scrub_creds(Rem, [scrub_payload_creds(H)|Acc]).
+
+%% Scrub credentials from specific payloads
+scrub_payload_creds({'BindRequest', N, DN, {simple, _PWD}}) ->
+  {'BindRequest', N, scrub_dn(DN), {simple, ?SCRUBBED_CREDENTIAL}};
+scrub_payload_creds(Any) -> Any.
+
+scrub_dn(DN) -> scrub_dn(DN, network).
+
+scrub_dn(DN, network_unsafe) -> DN;
+scrub_dn(DN, false)          -> DN;
+scrub_dn(DN, _) ->
+    case is_dn(DN) of
+        true -> scrub_rdn(string:tokens(DN, ","), []);
+        _    ->
+            %% We aren't fully certain its a DN, & don't know what sensitive
+            %% info could be contained, thus just scrub the entire credential
+            ?SCRUBBED_CREDENTIAL
+    end.
+
+scrub_rdn([], Acc) ->
+    string:join(lists:reverse(Acc), ",");
+scrub_rdn([DN|Rem], Acc) ->
+    DN0 = case catch string:tokens(DN, "=") of
+              L = [RDN, _] -> case string:to_lower(RDN) of
+                                  "cn"  -> [RDN, ?SCRUBBED_CREDENTIAL];
+                                  "dc"  -> [RDN, ?SCRUBBED_CREDENTIAL];
+                                  "ou"  -> [RDN, ?SCRUBBED_CREDENTIAL];
+                                  "uid" -> [RDN, ?SCRUBBED_CREDENTIAL];
+                                  _     -> L
+                              end;
+              _Any ->
+                  %% There's no RDN, log "xxxx=xxxx"
+                  [?SCRUBBED_CREDENTIAL, ?SCRUBBED_CREDENTIAL]
+          end,
+  scrub_rdn(Rem, [string:join(DN0, "=")|Acc]).
+
+is_dn(S) when is_list(S) ->
+    case catch string:tokens(to_list(S), "=") of
+        L when length(L) > 1 -> true;
+        _                    -> false
+    end;
+is_dn(_S) -> false.
+
+to_list(S) when is_list(S)   -> S;
+to_list(S) when is_binary(S) -> binary_to_list(S);
+to_list(S) when is_atom(S)   -> atom_to_list(S);
+to_list(S)                   -> {error, {badarg, S}}.
+
 log(Fmt,  Args) -> case env(log) of
                        false -> ok;
                        _     -> rabbit_log:info(Fmt ++ "~n", Args)
index e9f076b9513bba98313f73dd10df40be426e2a5e..b6ac7ced920456b9f9e24f7aac5cebf7e0432385 100644 (file)
@@ -1,7 +1,7 @@
 %% -*- erlang -*-
 {application, rabbitmq_auth_backend_ldap,
  [{description, "RabbitMQ LDAP Authentication Backend"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {mod, {rabbit_auth_backend_ldap_app, []}},
@@ -9,6 +9,7 @@
           {user_dn_pattern,       "${username}"},
           {dn_lookup_attribute,   none},
           {dn_lookup_base,        none},
+          {group_lookup_base,     none},
           {dn_lookup_bind,        as_user},
           {other_bind,            as_user},
           {vhost_access_query,    {constant, true}},
@@ -17,8 +18,8 @@
           {use_ssl,               false},
           {use_starttls,          false},
           {ssl_options,           []},
-          {port,                  389},
+          {port,                  3890},
           {timeout,               infinity},
           {log,                   false},
-          {pool_size,             10} ] },
+          {pool_size,             64} ] },
   {applications, [kernel, stdlib, eldap, rabbit]}]}.
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/rabbit_ldap_seed.erl b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/rabbit_ldap_seed.erl
new file mode 100644 (file)
index 0000000..3907e34
--- /dev/null
@@ -0,0 +1,200 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_ldap_seed).
+
+-include_lib("eldap/include/eldap.hrl").
+
+-export([seed/1,delete/1]).
+
+seed(Logon) ->
+    H = connect(Logon),
+    ok = add(H, rabbitmq_com()),
+    ok = add(H, ou("people")),
+    [ add(H, P) || P <- people() ],
+    ok = add(H, ou("vhosts")),
+    ok = add(H, test()),
+    ok = add(H, ou("groups")),
+    [ add(H, P) || P <- groups() ],
+    eldap:close(H),
+    ok.
+
+rabbitmq_com() ->
+    {"dc=rabbitmq,dc=com",
+      [{"objectClass", ["dcObject", "organization"]},
+       {"dc", ["rabbitmq"]},
+       {"o", ["Test"]}]}.
+
+
+delete(Logon) ->
+    H = connect(Logon),
+    eldap:delete(H, "ou=test,dc=rabbitmq,dc=com"),
+    eldap:delete(H, "ou=test,ou=vhosts,dc=rabbitmq,dc=com"),
+    eldap:delete(H, "ou=vhosts,dc=rabbitmq,dc=com"),
+    [ eldap:delete(H, P) || {P, _} <- groups() ],
+    [ eldap:delete(H, P) || {P, _} <- people() ],
+    eldap:delete(H, "ou=groups,dc=rabbitmq,dc=com"),
+    eldap:delete(H, "ou=people,dc=rabbitmq,dc=com"),
+    eldap:delete(H, "dc=rabbitmq,dc=com"),
+    eldap:close(H),
+    ok.
+
+people() ->
+    [ bob(),
+      dominic(),
+      charlie(),
+      edward(),
+      johndoe(),
+      alice(),
+      peter(),
+      carol()
+    ].
+
+groups() ->
+    [wheel_group(),
+     people_group(),
+     staff_group(),
+     bobs_group(),
+     bobs2_group(),
+     admins_group()
+    ].
+
+wheel_group() ->
+    {A, _} = alice(),
+    {C, _} = charlie(),
+    {D, _} = dominic(),
+    {P, _} = peter(),
+    {"cn=wheel,ou=groups,dc=rabbitmq,dc=com",
+     [{"objectClass", ["groupOfNames"]},
+      {"cn", ["wheel"]},
+      {"member", [A, C, D, P]}]}.
+
+people_group() ->
+    {C, _} = charlie(),
+    {D, _} = dominic(),
+    {P, _} = peter(),
+    {"cn=people,ou=groups,dc=rabbitmq,dc=com",
+     [{"objectClass", ["groupOfNames"]},
+      {"cn", ["people"]},
+      {"member", [C, D, P]}]}.
+
+staff_group() ->
+    {C, _} = charlie(),
+    {D, _} = dominic(),
+    {P, _} = peter(),
+    {"cn=staff,ou=groups,dc=rabbitmq,dc=com",
+     [{"objectClass", ["groupOfNames"]},
+      {"cn", ["people"]},
+      {"member", [C, D, P]}]}.
+
+bobs_group() ->
+    {B, _} = bob(),
+    {"cn=bobs,ou=groups,dc=rabbitmq,dc=com",
+     [{"objectClass", ["groupOfNames"]},
+      {"cn", ["bobs"]},
+      {"member", [B]}]}.
+
+bobs2_group() ->
+    {B, _} = bobs_group(),
+    {"cn=bobs2,ou=groups,dc=rabbitmq,dc=com",
+     [{"objectClass", ["groupOfNames"]},
+      {"cn", ["bobs2"]},
+      {"member", [B]}]}.
+
+admins_group() ->
+    {B, _} = bobs2_group(),
+    {W, _} = wheel_group(), 
+    {"cn=admins,ou=groups,dc=rabbitmq,dc=com",
+     [{"objectClass", ["groupOfNames"]},
+      {"cn", ["admins"]},
+      {"member", [B, W]}]}.
+
+person(Cn, Sn) ->
+    {"cn="++Cn++",ou=people,dc=rabbitmq,dc=com",
+     [{"objectClass", ["person"]},
+      {"cn", [Cn]},
+      {"sn", [Sn]},
+      {"userPassword", ["password"]}]}.
+
+bob() -> person("Bob", "Robert").
+dominic() -> person("Dominic", "Dom").
+charlie() -> person("Charlie", "Charlie Boy").
+edward() -> person("Edward", "Ed").
+johndoe() -> person("John Doe", "Doe").
+
+alice() ->
+    {"cn=Alice,ou=people,dc=rabbitmq,dc=com",
+     [{"objectClass", ["person"]},
+      {"cn", ["Alice"]},
+      {"sn", ["Ali"]},
+      {"userPassword", ["password"]},
+      {"description", ["can-declare-queues"]}]}.
+
+peter() ->
+    {"uid=peter,ou=people,dc=rabbitmq,dc=com",
+     [{"cn", ["Peter"]},
+      {"givenName", ["Peter"]},
+      {"sn", ["Jones"]},
+      {"uid", ["peter"]},
+      {"uidNumber", ["5000"]},
+      {"gidNumber", ["10000"]},
+      {"homeDirectory", ["/home/peter"]},
+      {"mail", ["peter.jones@rabbitmq.com"]},
+      {"objectClass", ["top",
+                       "posixAccount",
+                       "shadowAccount",
+                       "inetOrgPerson",
+                       "organizationalPerson",
+                       "person"]},
+      {"loginShell", ["/bin/bash"]},
+      {"userPassword", ["password"]},
+      {"memberOf", ["cn=wheel,ou=groups,dc=rabbitmq,dc=com",
+                    "cn=staff,ou=groups,dc=rabbitmq,dc=com",
+                    "cn=people,ou=groups,dc=rabbitmq,dc=com"]}]}.
+
+carol() ->
+    {"uid=carol,ou=people,dc=rabbitmq,dc=com",
+     [{"cn", ["Carol"]},
+      {"givenName", ["Carol"]},
+      {"sn", ["Meyers"]},
+      {"uid", ["peter"]},
+      {"uidNumber", ["655"]},
+      {"gidNumber", ["10000"]},
+      {"homeDirectory", ["/home/carol"]},
+      {"mail", ["carol.meyers@example.com"]},
+      {"objectClass", ["top",
+                       "posixAccount",
+                       "shadowAccount",
+                       "inetOrgPerson",
+                       "organizationalPerson",
+                       "person"]},
+      {"loginShell", ["/bin/bash"]},
+      {"userPassword", ["password"]}]}.
+
+add(H, {A, B}) ->
+    ok = eldap:add(H, A, B).
+
+connect({Host, Port}) ->
+    {ok, H} = eldap:open([Host], [{port, Port}]),
+    ok = eldap:simple_bind(H, "cn=admin,dc=rabbitmq,dc=com", "admin"),
+    H.
+
+ou(Name) ->
+    {"ou=" ++ Name ++ ",dc=rabbitmq,dc=com", [{"objectClass", ["organizationalUnit"]}, {"ou", [Name]}]}.
+
+test() ->
+    {"ou=test,ou=vhosts,dc=rabbitmq,dc=com", [{"objectClass", ["top", "organizationalUnit"]}, {"ou", ["test"]}]}.
+
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/src/rabbit_auth_backend_ldap_test.erl b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/src/rabbit_auth_backend_ldap_test.erl
deleted file mode 100644 (file)
index cd29160..0000000
+++ /dev/null
@@ -1,250 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(rabbit_auth_backend_ldap_test).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("amqp_client/include/amqp_client.hrl").
-
--define(SIMON_NAME, "Simon MacMullen").
--define(MIKEB_NAME, "Mike Bridgen").
--define(VHOST, "test").
-
--define(SIMON, #amqp_params_network{username     = << ?SIMON_NAME >>,
-                                    password     = <<"password">>,
-                                    virtual_host = << ?VHOST >>}).
-
--define(MIKEB, #amqp_params_network{username     = << ?MIKEB_NAME >>,
-                                    password     = <<"password">>,
-                                    virtual_host = << ?VHOST >>}).
-
-%%--------------------------------------------------------------------
-
-ldap_only_test_() ->
-    { setup,
-      fun () -> ok = application:set_env(rabbit, auth_backends,
-          [rabbit_auth_backend_ldap]) end,
-      fun (_) -> ok = application:unset_env(rabbit, auth_backends) end,
-      [ {"LDAP Login", login()},
-        {"LDAP In group", in_group()},
-        {"LDAP Constant", const()},
-        {"LDAP String match", string_match()},
-        {"LDAP Boolean check", boolean_logic()},
-        {"LDAP Tags", tag_check([])}
-    ]}.
-
-ldap_and_internal_test_() ->
-    { setup,
-      fun () ->
-          ok = application:set_env(rabbit, auth_backends,
-              [{rabbit_auth_backend_ldap, rabbit_auth_backend_internal}]),
-          ok = control_action(add_user, [ ?SIMON_NAME, ""]),
-          ok = control_action(set_permissions, [ ?SIMON_NAME, "prefix-.*", "prefix-.*", "prefix-.*"]),
-          ok = control_action(set_user_tags, [ ?SIMON_NAME, "management", "foo"]),
-          ok = control_action(add_user, [ ?MIKEB_NAME, ""]),
-          ok = control_action(set_permissions, [ ?MIKEB_NAME, "", "", ""])
-      end,
-      fun (_) ->
-          ok = application:unset_env(rabbit, auth_backends),
-          ok = control_action(delete_user, [ ?SIMON_NAME ]),
-          ok = control_action(delete_user, [ ?MIKEB_NAME ])
-      end,
-      [ {"LDAP&Internal Login", login()},
-        {"LDAP&Internal Permissions", permission_match()},
-        {"LDAP&Internal Tags", tag_check([management, foo])}
-    ]}.
-
-internal_followed_ldap_and_internal_test_() ->
-    { setup,
-      fun () ->
-          ok = application:set_env(rabbit, auth_backends,
-              [rabbit_auth_backend_internal, {rabbit_auth_backend_ldap, rabbit_auth_backend_internal}]),
-          ok = control_action(add_user, [ ?SIMON_NAME, ""]),
-          ok = control_action(set_permissions, [ ?SIMON_NAME, "prefix-.*", "prefix-.*", "prefix-.*"]),
-          ok = control_action(set_user_tags, [ ?SIMON_NAME, "management", "foo"]),
-          ok = control_action(add_user, [ ?MIKEB_NAME, ""]),
-          ok = control_action(set_permissions, [ ?MIKEB_NAME, "", "", ""])
-      end,
-      fun (_) ->
-          ok = application:unset_env(rabbit, auth_backends),
-          ok = control_action(delete_user, [ ?SIMON_NAME ]),
-          ok = control_action(delete_user, [ ?MIKEB_NAME ])
-      end,
-      [ {"Internal, LDAP&Internal Login", login()},
-        {"Internal, LDAP&Internal Permissions", permission_match()},
-        {"Internal, LDAP&Internal Tags", tag_check([management, foo])}
-    ]}.
-
-
-%%--------------------------------------------------------------------
-
-login() ->
-    [test_login(Env, L, case {LGood, EnvGood} of
-                            {good, good} -> fun succ/1;
-                            _            -> fun fail/1
-                        end) || {LGood, L}     <- logins(),
-                                {EnvGood, Env} <- login_envs()].
-
-logins() ->
-    [{bad, #amqp_params_network{}},
-     {bad, #amqp_params_network{username = <<"Simon MacMullen">>}},
-     {bad, #amqp_params_network{username = <<"Simon MacMullen">>,
-                                password = <<"password">>}},
-     {good, ?SIMON},
-     {good, ?MIKEB}].
-
-login_envs() ->
-    [{good, base_login_env()},
-     {good, dn_lookup_pre_bind_env()},
-     {good, other_bind_admin_env()},
-     {good, other_bind_anon_env()},
-     {bad, other_bind_broken_env()}].
-
-base_login_env() ->
-    [{user_dn_pattern,    "cn=${username},ou=People,dc=example,dc=com"},
-     {dn_lookup_attribute, none},
-     {dn_lookup_base,      none},
-     {dn_lookup_bind,      as_user},
-     {other_bind,          as_user}].
-
-%% TODO configure OpenLDAP to allow a dn_lookup_post_bind_env()
-dn_lookup_pre_bind_env() ->
-    [{user_dn_pattern,    "${username}"},
-     {dn_lookup_attribute, "cn"},
-     {dn_lookup_base,      "OU=People,DC=example,DC=com"},
-     {dn_lookup_bind,      {"cn=admin,dc=example,dc=com", "admin"}}].
-
-other_bind_admin_env() ->
-    [{other_bind, {"cn=admin,dc=example,dc=com", "admin"}}].
-
-other_bind_anon_env() ->
-    [{other_bind, anon}].
-
-other_bind_broken_env() ->
-    [{other_bind, {"cn=admin,dc=example,dc=com", "admi"}}].
-
-test_login(Env, Login, ResultFun) ->
-    ?_test(try
-               set_env(Env),
-               ResultFun(Login)
-           after
-               set_env(base_login_env())
-           end).
-
-set_env(Env) ->
-    [application:set_env(rabbitmq_auth_backend_ldap, K, V) || {K, V} <- Env].
-
-succ(Login) -> ?assertMatch({ok, _}, amqp_connection:start(Login)).
-fail(Login) -> ?assertMatch({error, _}, amqp_connection:start(Login)).
-
-%%--------------------------------------------------------------------
-
-in_group() ->
-    X = [#'exchange.declare'{exchange = <<"test">>}],
-    test_resource_funs([{?SIMON, X, ok},
-                         {?MIKEB, X, fail}]).
-
-const() ->
-    Q = [#'queue.declare'{queue = <<"test">>}],
-    test_resource_funs([{?SIMON, Q, ok},
-                        {?MIKEB, Q, fail}]).
-
-string_match() ->
-    B = fun(N) ->
-                [#'exchange.declare'{exchange = N},
-                 #'queue.declare'{queue = <<"test">>},
-                 #'queue.bind'{exchange = N, queue = <<"test">>}]
-        end,
-    test_resource_funs([{?SIMON, B(<<"xch-Simon MacMullen-abc123">>), ok},
-                        {?SIMON, B(<<"abc123">>),                     fail},
-                        {?SIMON, B(<<"xch-Someone Else-abc123">>),    fail}]).
-
-boolean_logic() ->
-    Q1 = [#'queue.declare'{queue = <<"test1">>},
-          #'basic.consume'{queue = <<"test1">>}],
-    Q2 = [#'queue.declare'{queue = <<"test2">>},
-          #'basic.consume'{queue = <<"test2">>}],
-    [test_resource_fun(PTR) || PTR <- [{?SIMON, Q1, ok},
-                                       {?SIMON, Q2, ok},
-                                       {?MIKEB, Q1, fail},
-                                       {?MIKEB, Q2, fail}]].
-
-permission_match() ->
-    B = fun(N) ->
-                [#'exchange.declare'{exchange = N},
-                 #'queue.declare'{queue = <<"prefix-test">>},
-                 #'queue.bind'{exchange = N, queue = <<"prefix-test">>}]
-        end,
-    test_resource_funs([{?SIMON, B(<<"prefix-abc123">>),              ok},
-                        {?SIMON, B(<<"abc123">>),                     fail},
-                        {?SIMON, B(<<"xch-Simon MacMullen-abc123">>), fail}]).
-
-tag_check(Tags) ->
-    fun() ->
-            {ok, User} = rabbit_access_control:check_user_pass_login(
-                        << ?SIMON_NAME >>, <<"password">>),
-            ?assertEqual(Tags, User#user.tags)
-    end.
-
-
-%%--------------------------------------------------------------------
-
-test_resource_funs(PTRs) -> [test_resource_fun(PTR) || PTR <- PTRs].
-
-test_resource_fun({Person, Things, Result}) ->
-    fun() ->
-            {ok, Conn} = amqp_connection:start(Person),
-            {ok, Ch} = amqp_connection:open_channel(Conn),
-            ?assertEqual(Result,
-                         try
-                             [amqp_channel:call(Ch, T) || T <- Things],
-                             amqp_connection:close(Conn),
-                             ok
-                         catch exit:_ -> fail
-                         end)
-    end.
-
-control_action(Command, Args) ->
-    control_action(Command, node(), Args, default_options()).
-
-control_action(Command, Args, NewOpts) ->
-    control_action(Command, node(), Args,
-                   expand_options(default_options(), NewOpts)).
-
-control_action(Command, Node, Args, Opts) ->
-    case catch rabbit_control_main:action(
-                 Command, Node, Args, Opts,
-                 fun (Format, Args1) ->
-                         io:format(Format ++ " ...~n", Args1)
-                 end) of
-        ok ->
-            io:format("done.~n"),
-            ok;
-        Other ->
-            io:format("failed.~n"),
-            Other
-    end.
-
-default_options() -> [{"-p", ?VHOST}, {"-q", "false"}].
-
-expand_options(As, Bs) ->
-    lists:foldl(fun({K, _}=A, R) ->
-                        case proplists:is_defined(K, R) of
-                            true -> R;
-                            false -> [A | R]
-                        end
-                end, Bs, As).
-
diff --git a/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/system_SUITE.erl b/rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/system_SUITE.erl
new file mode 100644 (file)
index 0000000..40f5e23
--- /dev/null
@@ -0,0 +1,632 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(system_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(ALICE_NAME, "Alice").
+-define(BOB_NAME, "Bob").
+-define(CAROL_NAME, "Carol").
+-define(PETER_NAME, "Peter").
+
+-define(VHOST, "test").
+-define(DEFAULT_LDAP_PORT, "3890").
+
+-define(ALICE, #amqp_params_network{username     = <<?ALICE_NAME>>,
+                                    password     = <<"password">>,
+                                    virtual_host = <<?VHOST>>}).
+
+-define(BOB, #amqp_params_network{username       = <<?BOB_NAME>>,
+                                  password       = <<"password">>,
+                                  virtual_host   = <<?VHOST>>}).
+
+-define(CAROL, #amqp_params_network{username     = <<?CAROL_NAME>>,
+                                    password     = <<"password">>,
+                                    virtual_host = <<?VHOST>>}).
+
+-define(PETER, #amqp_params_network{username     = <<?PETER_NAME>>,
+                                    password     = <<"password">>,
+                                    virtual_host = <<?VHOST>>}).
+
+-define(BASE_CONF_RABBIT, {rabbit, [{default_vhost, <<"test">>}]}).
+
+base_conf_ldap(LdapPort) ->
+                    {rabbitmq_auth_backend_ldap, [{servers, ["localhost"]},
+                                                  {user_dn_pattern,    "cn=${username},ou=People,dc=rabbitmq,dc=com"},
+                                                  {other_bind,         anon},
+                                                  {use_ssl,            false},
+                                                  {port,               LdapPort},
+                                                  {log,                true},
+                                                  {group_lookup_base,  "ou=groups,dc=rabbitmq,dc=com"},
+                                                  {vhost_access_query, {exists, "ou=${vhost},ou=vhosts,dc=rabbitmq,dc=com"}},
+                                                  {resource_access_query,
+                                                   {for, [{resource, exchange,
+                                                           {for, [{permission, configure,
+                                                                   {in_group, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"}
+                                                                  },
+                                                                  {permission, write, {constant, true}},
+                                                                  {permission, read,
+                                                                   {match, {string, "${name}"},
+                                                                           {string, "^xch-${username}-.*"}}
+                                                                  }
+                                                                 ]}},
+                                                          {resource, queue,
+                                                           {for, [{permission, configure,
+                                                                   {match, {attribute, "${user_dn}", "description"},
+                                                                           {string, "can-declare-queues"}}
+                                                                  },
+                                                                  {permission, write, {constant, true}},
+                                                                  {permission, read,
+                                                                   {'or',
+                                                                    [{'and',
+                                                                      [{equals, "${name}", "test1"},
+                                                                       {equals, "${username}", "Alice"}]},
+                                                                     {'and',
+                                                                      [{equals, "${name}", "test2"},
+                                                                       {'not', {equals, "${username}", "Bob"}}]}
+                                                                    ]}}
+                                                                 ]}}
+                                                          ]}},
+                                                  {tag_queries, [{monitor,       {constant, true}},
+                                                                 {administrator, {constant, false}},
+                                                                 {management,    {constant, false}}]}
+                                                ]}.
+
+%%--------------------------------------------------------------------
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                ldap_only,
+                                ldap_and_internal,
+                                internal_followed_ldap_and_internal,
+                                tag_attribution_ldap_only,
+                                tag_attribution_ldap_and_internal,
+                                tag_attribution_internal_followed_by_ldap_and_internal,
+                                invalid_or_clause_ldap_only,
+                                invalid_and_clause_ldap_only
+                               ]}
+    ].
+
+suite() ->
+    [{timetrap, {seconds, 60}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE},
+        {rmq_extra_tcp_ports, [tcp_port_amqp_tls_extra]}
+      ]),
+    {LdapPort, _} = string:to_integer(os:getenv("LDAP_PORT", ?DEFAULT_LDAP_PORT)),
+    Config2 = rabbit_ct_helpers:merge_app_env(Config1, ?BASE_CONF_RABBIT),
+    Config3 = rabbit_ct_helpers:merge_app_env(Config2, base_conf_ldap(LdapPort)),
+    Logon = {"localhost", LdapPort},
+    rabbit_ldap_seed:delete(Logon),
+    rabbit_ldap_seed:seed(Logon),
+    Config4 = rabbit_ct_helpers:set_config(Config3, {ldap_port, LdapPort}),
+
+    rabbit_ct_helpers:run_setup_steps(Config4,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ldap_seed:delete({"localhost", ?config(ldap_port, Config)}),
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_internal(Config) ->
+    ok = control_action(Config, add_user, [?ALICE_NAME, ""]),
+    ok = control_action(Config, set_permissions, [?ALICE_NAME, "prefix-.*", "prefix-.*", "prefix-.*"]),
+    ok = control_action(Config, set_user_tags, [?ALICE_NAME, "management", "foo"]),
+    ok = control_action(Config, add_user, [?BOB_NAME, ""]),
+    ok = control_action(Config, set_permissions, [?BOB_NAME, "", "", ""]),
+    ok = control_action(Config, add_user, [?PETER_NAME, ""]),
+    ok = control_action(Config, set_permissions, [?PETER_NAME, "", "", ""]).
+
+end_internal(Config) ->
+    ok = control_action(Config, delete_user, [?ALICE_NAME]),
+    ok = control_action(Config, delete_user, [?BOB_NAME]),
+    ok = control_action(Config, delete_user, [?PETER_NAME]).
+
+init_per_testcase(Testcase, Config)
+    when Testcase == ldap_and_internal;
+         Testcase == internal_followed_ldap_and_internal ->
+    init_internal(Config),
+    rabbit_ct_helpers:testcase_started(Config, Testcase);
+init_per_testcase(Testcase, Config)
+    when Testcase == tag_attribution_ldap_and_internal;
+         Testcase == tag_attribution_internal_followed_by_ldap_and_internal ->
+    % back up tag queries
+    Cfg = case rabbit_ct_broker_helpers:rpc(Config, 0,
+                                            application,
+                                            get_env,
+                                            [rabbit_auth_backend_ldap, tag_queries]) of
+               undefined -> undefined;
+               {ok, X} -> X
+          end,
+    rabbit_ct_helpers:set_config(Config, {tag_queries_config, Cfg}),
+    internal_authorization_teardown(Config),
+    internal_authorization_setup(Config),
+    rabbit_ct_helpers:testcase_started(Config, Testcase);
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config)
+    when Testcase == ldap_and_internal;
+         Testcase == internal_followed_ldap_and_internal ->
+    end_internal(Config),
+    rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(Testcase, Config)
+    when Testcase == tag_attribution_ldap_and_internal;
+         Testcase == tag_attribution_internal_followed_by_ldap_and_internal ->
+    % restore tag queries
+    Cfg = rabbit_ct_helpers:get_config(Config, tag_queries_config),
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+                                      application,
+                                      set_env,
+                                      [rabbit_auth_backend_ldap, tag_queries, Cfg]),
+    internal_authorization_teardown(Config),
+    rabbit_ct_helpers:testcase_finished(Config, Testcase);
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testsuite cases
+%% -------------------------------------------------------------------
+
+ldap_only(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_ldap]]),
+    login(Config),
+    in_group(Config),
+    const(Config),
+    string_match(Config),
+    boolean_logic(Config),
+    tag_check(Config, [monitor]),
+    tag_check_subst(Config),
+    logging(Config),
+    ok.
+
+ldap_and_internal(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           application, set_env, [rabbit, auth_backends,
+                                  [{rabbit_auth_backend_ldap, rabbit_auth_backend_internal}]]),
+    login(Config),
+    permission_match(Config),
+    tag_check(Config, [monitor, management, foo]),
+    ok.
+
+internal_followed_ldap_and_internal(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           application, set_env, [rabbit, auth_backends,
+                                  [rabbit_auth_backend_internal, {rabbit_auth_backend_ldap, rabbit_auth_backend_internal}]]),
+    login(Config),
+    permission_match(Config),
+    tag_check(Config, [monitor, management, foo]),
+    ok.
+
+tag_attribution_ldap_only(Config) ->
+    set_env(Config, tag_query_configuration()),
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_ldap]]),
+    tag_check(Config, <<"Edward">>, <<"password">>, [monitor, normal]).
+
+tag_attribution_ldap_and_internal(Config) ->
+    set_env(Config, tag_query_configuration()),
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           application, set_env, [rabbit, auth_backends, [{rabbit_auth_backend_ldap,
+                                                           rabbit_auth_backend_internal}]]),
+    tag_check(Config, <<"Edward">>, <<"password">>,
+               [monitor, normal] ++ internal_authorization_tags()).
+
+tag_attribution_internal_followed_by_ldap_and_internal(Config) ->
+    set_env(Config, tag_query_configuration()),
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_internal,
+                                                          {rabbit_auth_backend_ldap,
+                                                           rabbit_auth_backend_internal}]]),
+    tag_check(Config, <<"Edward">>, <<"password">>,
+               [monitor, normal] ++ internal_authorization_tags()).
+
+invalid_or_clause_ldap_only(Config) ->
+    set_env(Config, vhost_access_query_or_in_group()),
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_ldap]]),
+    B = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+    {ok, C} = amqp_connection:start(B?ALICE),
+    ok = amqp_connection:close(C).
+
+invalid_and_clause_ldap_only(Config) ->
+    set_env(Config, vhost_access_query_and_in_group()),
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           application, set_env, [rabbit, auth_backends, [rabbit_auth_backend_ldap]]),
+    B = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+    % NB: if the query crashes the ldap plugin it returns {error, access_refused}
+    % This may not be a reliable return value assertion
+    {error, not_allowed} = amqp_connection:start(B?ALICE).
+
+%%--------------------------------------------------------------------
+
+login(Config) ->
+    lists:flatten(
+      [test_login(Config, {N, Env}, L, FilterList, case {LGood, EnvGood} of
+                                               {good, good} -> fun succ/1;
+                                               _            -> fun fail/1
+                                           end) ||
+          {LGood, FilterList, L, _Tags}  <- logins(Config),
+          {N, {EnvGood, Env}}            <- login_envs()]).
+
+logins(Config) -> logins_network(Config) ++ logins_direct(Config).
+
+%% Format for login tests, {Outcome, FilterList, Login, Tags}.
+%% Tests skipped for each login_env reference in FilterList.
+logins_network(Config) ->
+    B = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+    [{bad,  [5, 6], B#amqp_params_network{}, []},
+     {bad,  [5, 6], B#amqp_params_network{username     = <<?ALICE_NAME>>}, []},
+     {bad,  [5, 6], B#amqp_params_network{username     = <<?ALICE_NAME>>,
+                                          password     = <<"password">>}, []},
+     {bad,  [5, 6], B#amqp_params_network{username     = <<"Alice">>,
+                                          password     = <<"Alicja">>,
+                                          virtual_host = <<?VHOST>>}, []},
+     {bad,  [1, 2, 3, 4, 6, 7], B?CAROL, []},
+     {good, [5, 6], B?ALICE, []},
+     {good, [5, 6], B?BOB, []},
+     {good, [1, 2, 3, 4, 6, 7, 8], B?PETER, []}].
+
+logins_direct(Config) ->
+    N = #amqp_params_direct{node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename)},
+    [{bad,  [5], N#amqp_params_direct{}, []},
+     {bad,  [5], N#amqp_params_direct{username         = <<?ALICE_NAME>>}, []},
+     {bad,  [5], N#amqp_params_direct{username         = <<?ALICE_NAME>>,
+                                      password         = <<"password">>}, [management]},
+     {good, [5], N#amqp_params_direct{username         = <<?ALICE_NAME>>,
+                                      password         = <<"password">>,
+                                      virtual_host     = <<?VHOST>>}, [management]}].
+
+%% Format for login envs, {Reference, {Outcome, Env}}
+login_envs() ->
+    [{1, {good, base_login_env()}},
+     {2, {good, dn_lookup_pre_bind_env()}},
+     {3, {good, other_bind_admin_env()}},
+     {4, {good, other_bind_anon_env()}},
+     {5, {good, posix_vhost_access_multiattr_env()}},
+     {6, {good, tag_queries_subst_env()}},
+     {7, {bad,  other_bind_broken_env()}},
+     {8, {good, vhost_access_query_nested_groups_env()}}].
+
+base_login_env() ->
+    [{user_dn_pattern,     "cn=${username},ou=People,dc=rabbitmq,dc=com"},
+     {dn_lookup_attribute, none},
+     {dn_lookup_base,      none},
+     {dn_lookup_bind,      as_user},
+     {other_bind,          as_user},
+     {tag_queries,         [{monitor,       {constant, true}},
+                            {administrator, {constant, false}},
+                            {management,    {constant, false}}]},
+     {vhost_access_query,  {exists, "ou=${vhost},ou=vhosts,dc=rabbitmq,dc=com"}},
+     {log,                  true}].
+
+%% TODO configure OpenLDAP to allow a dn_lookup_post_bind_env()
+dn_lookup_pre_bind_env() ->
+    [{user_dn_pattern,     "${username}"},
+     {dn_lookup_attribute, "cn"},
+     {dn_lookup_base,      "OU=People,DC=rabbitmq,DC=com"},
+     {dn_lookup_bind,      {"cn=admin,dc=rabbitmq,dc=com", "admin"}}].
+
+other_bind_admin_env() ->
+    [{other_bind, {"cn=admin,dc=rabbitmq,dc=com", "admin"}}].
+
+other_bind_anon_env() ->
+    [{other_bind, anon}].
+
+other_bind_broken_env() ->
+    [{other_bind, {"cn=admin,dc=rabbitmq,dc=com", "admi"}}].
+
+tag_queries_subst_env() ->
+    [{tag_queries, [{administrator, {constant, false}},
+                    {management,
+                     {exists, "ou=${vhost},ou=vhosts,dc=rabbitmq,dc=com"}}]}].
+
+posix_vhost_access_multiattr_env() ->
+    [{user_dn_pattern, "uid=${username},ou=People,dc=rabbitmq,dc=com"},
+     {vhost_access_query,
+      {'and', [{exists, "ou=${vhost},ou=vhosts,dc=rabbitmq,dc=com"},
+               {equals,
+                {attribute, "${user_dn}","memberOf"},
+                {string, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"}},
+               {equals,
+                {attribute, "${user_dn}","memberOf"},
+                {string, "cn=people,ou=groups,dc=rabbitmq,dc=com"}},
+               {equals,
+                {string, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"},
+                {attribute,"${user_dn}","memberOf"}},
+               {equals,
+                {string, "cn=people,ou=groups,dc=rabbitmq,dc=com"},
+                {attribute, "${user_dn}","memberOf"}},
+               {match,
+                {attribute, "${user_dn}","memberOf"},
+                {string, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"}},
+               {match,
+                {attribute, "${user_dn}","memberOf"},
+                {string, "cn=people,ou=groups,dc=rabbitmq,dc=com"}},
+               {match,
+                {string, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"},
+                {attribute, "${user_dn}","memberOf"}},
+               {match,
+                {string, "cn=people,ou=groups,dc=rabbitmq,dc=com"},
+                {attribute, "${user_dn}","memberOf"}}
+              ]}}].
+
+vhost_access_query_or_in_group() ->
+    [{vhost_access_query,
+      {'or', [
+            {in_group, "cn=bananas,ou=groups,dc=rabbitmq,dc=com"},
+            {in_group, "cn=apples,ou=groups,dc=rabbitmq,dc=com"},
+            {in_group, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"}
+             ]}}].
+
+vhost_access_query_and_in_group() ->
+    [{vhost_access_query,
+      {'and', [
+            {in_group, "cn=bananas,ou=groups,dc=rabbitmq,dc=com"},
+            {in_group, "cn=wheel,ou=groups,dc=rabbitmq,dc=com"}
+             ]}}].
+
+vhost_access_query_nested_groups_env() ->
+    [{vhost_access_query, {in_group_nested, "cn=admins,ou=groups,dc=rabbitmq,dc=com"}}].
+
+test_login(Config, {N, Env}, Login, FilterList, ResultFun) ->
+    case lists:member(N, FilterList) of
+        true -> [];
+        _ ->
+            try
+               set_env(Config, Env),
+               ResultFun(Login)
+            after
+               set_env(Config, base_login_env())
+            end
+    end.
+
+rpc_set_env(Config, Args) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, Args).
+
+set_env(Config, Env) ->
+    [rpc_set_env(Config, [rabbitmq_auth_backend_ldap, K, V]) || {K, V} <- Env].
+
+succ(Login) ->
+    {ok, Pid} = amqp_connection:start(Login),
+    amqp_connection:close(Pid).
+fail(Login) -> ?assertMatch({error, _}, amqp_connection:start(Login)).
+
+%%--------------------------------------------------------------------
+
+in_group(Config) ->
+    X = [#'exchange.declare'{exchange = <<"test">>}],
+    B = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+    test_resources([{B?ALICE, X, ok},
+                        {B?BOB, X, fail}]).
+
+const(Config) ->
+    Q = [#'queue.declare'{queue = <<"test">>}],
+    B = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+    test_resources([{B?ALICE, Q, ok},
+                        {B?BOB, Q, fail}]).
+
+string_match(Config) ->
+    B = fun(N) ->
+                [#'exchange.declare'{exchange = N},
+                 #'queue.declare'{queue = <<"test">>},
+                 #'queue.bind'{exchange = N, queue = <<"test">>}]
+        end,
+    P = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+    test_resources([{P?ALICE, B(<<"xch-Alice-abc123">>), ok},
+                        {P?ALICE, B(<<"abc123">>),                     fail},
+                        {P?ALICE, B(<<"xch-Someone Else-abc123">>),    fail}]).
+
+boolean_logic(Config) ->
+    Q1 = [#'queue.declare'{queue = <<"test1">>},
+          #'basic.consume'{queue = <<"test1">>}],
+    Q2 = [#'queue.declare'{queue = <<"test2">>},
+          #'basic.consume'{queue = <<"test2">>}],
+    P = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+    [test_resource(PTR) || PTR <- [{P?ALICE, Q1, ok},
+                                       {P?ALICE, Q2, ok},
+                                       {P?BOB, Q1, fail},
+                                       {P?BOB, Q2, fail}]].
+
+permission_match(Config) ->
+    B = fun(N) ->
+                [#'exchange.declare'{exchange = N},
+                 #'queue.declare'{queue = <<"prefix-test">>},
+                 #'queue.bind'{exchange = N, queue = <<"prefix-test">>}]
+        end,
+    P = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+    test_resources([{P?ALICE, B(<<"prefix-abc123">>),    ok},
+                        {P?ALICE, B(<<"abc123">>),           fail},
+                        {P?ALICE, B(<<"xch-Alice-abc123">>), fail}]).
+
+%% Tag check tests, with substitution
+tag_check_subst(Config) ->
+    lists:flatten(
+      [test_tag_check(Config, tag_queries_subst_env(),
+                      fun () -> tag_check(Config, Username, Password, VHost, Outcome, Tags) end) ||
+          {Outcome, _FilterList, #amqp_params_direct{username     = Username,
+                                                     password     = Password,
+                                                     virtual_host = VHost},
+           Tags} <- logins_direct(Config)]).
+
+%% Tag check
+tag_check(Config, Tags) ->
+    tag_check(Config, <<?ALICE_NAME>>, <<"password">>, Tags).
+
+tag_check(Config, Username, Password, Tags) ->
+    tag_check(Config, Username, Password, <<>>, good, Tags).
+
+tag_check(Config, Username, Password, VHost, Outcome, Tags)
+  when is_binary(Username), is_binary(Password), is_binary(VHost), is_list(Tags) ->
+    {ok, User} = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_access_control, check_user_login, [Username, [{password, Password}, {vhost, VHost}]]),
+    tag_check_outcome(Outcome, Tags, User);
+tag_check(_, _, _, _, _, _) -> fun() -> [] end.
+
+tag_check_outcome(good, Tags, User) -> ?assertEqual(Tags, User#user.tags);
+tag_check_outcome(bad, Tags, User)  -> ?assertNotEqual(Tags, User#user.tags).
+
+test_tag_check(Config, Env, TagCheckFun) ->
+    try
+       set_env(Config, Env),
+       TagCheckFun()
+    after
+       set_env(Config, base_login_env())
+    end.
+
+tag_query_configuration() ->
+    [{tag_queries,
+      [{administrator, {constant, false}},
+       %% Query result for tag `management` is FALSE
+       %% because this object does NOT exist.
+       {management,
+        {exists, "cn=${username},ou=Faculty,dc=Computer Science,dc=Engineering"}},
+       {monitor, {constant, true}},
+       %% Query result for tag `normal` is TRUE because
+       %% this object exists.
+       {normal,
+        {exists, "cn=${username},ou=people,dc=rabbitmq,dc=com"}}]}].
+
+internal_authorization_setup(Config) ->
+    ok = control_action(Config, add_user, ["Edward", ""]),
+    ok = control_action(Config, set_user_tags, ["Edward"] ++
+        [ atom_to_list(T) || T <- internal_authorization_tags() ]).
+
+internal_authorization_teardown(Config) ->
+    control_action(Config, delete_user, ["Edward"]).
+
+internal_authorization_tags() ->
+    [foo, bar].
+
+%% Logging tests, triggered within 'test_login/4'
+logging(Config) ->
+    lists:flatten(
+      [test_login(Config, {N, Env}, L, FilterList, case {LGood, EnvGood} of
+                                               {good, good} -> fun succ/1;
+                                               _            -> fun fail/1
+                                           end) ||
+          {LGood, FilterList, L}  <- logging_test_users(Config),
+          {N, {EnvGood, Env}}     <- logging_envs()]).
+
+%% Format for logging tests, {Outcome, FilterList, Login}.
+logging_test_users(Config) ->
+    P = #amqp_params_network{port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)},
+    [{bad,  [], P#amqp_params_network{username = <<?ALICE_NAME>>}},
+     {good, [], P?ALICE}].
+
+logging_envs() ->
+    [{1, {good, scrub_bind_creds_env()}},
+     {2, {good, display_bind_creds_env()}},
+     {3, {bad,  scrub_bind_single_cred_env()}},
+     {4, {bad,  scrub_bind_creds_no_equals_env()}},
+     {5, {bad,  scrub_bind_creds_no_seperator_env()}}].
+
+scrub_bind_creds_env() ->
+    [{log,         network},
+     {other_bind,  {"cn=admin,dc=rabbitmq,dc=com", "admin"}}].
+
+display_bind_creds_env() ->
+    [{log,         network_unsafe},
+     {other_bind,  {"cn=admin,dc=rabbitmq,dc=com", "admin"}}].
+
+scrub_bind_single_cred_env() ->
+    [{log,         network},
+     {other_bind,  {"dc=com", "admin"}}].
+
+scrub_bind_creds_no_equals_env() ->
+    [{log,         network},
+     {other_bind,  {"cn*admin,dc>rabbitmq,dc&com", "admin"}}].
+
+scrub_bind_creds_no_seperator_env() ->
+    [{log,         network},
+     {other_bind,  {"cn=admindc=rabbitmqdc&com", "admin"}}].
+
+%%--------------------------------------------------------------------
+
+test_resources(PTRs) -> [test_resource(PTR) || PTR <- PTRs].
+
+test_resource({Person, Things, Result}) ->
+    {ok, Conn} = amqp_connection:start(Person),
+    {ok, Ch} = amqp_connection:open_channel(Conn),
+    ?assertEqual(Result,
+                 try
+                     [amqp_channel:call(Ch, T) || T <- Things],
+                     ok
+                 catch exit:_ -> fail
+                 after
+                     amqp_connection:close(Conn)
+                 end).
+
+control_action(Config, Command, Args) ->
+    Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    control_action(Config, Command, Node, Args, default_options()).
+
+control_action(Config, Command, Args, NewOpts) ->
+    Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    control_action(Config, Command, Node, Args,
+                   expand_options(default_options(), NewOpts)).
+
+control_action(_Config, Command, Node, Args, Opts) ->
+    case catch rabbit_control_main:action(
+                 Command, Node, Args, Opts,
+                 fun (Format, Args1) ->
+                         io:format(Format ++ " ...~n", Args1)
+                 end) of
+        ok ->
+            io:format("done.~n"),
+            ok;
+        Other ->
+            io:format("failed.~n"),
+            Other
+    end.
+
+default_options() -> [{"-p", ?VHOST}, {"-q", "false"}].
+
+expand_options(As, Bs) ->
+    lists:foldl(fun({K, _}=A, R) ->
+                        case proplists:is_defined(K, R) of
+                            true  -> R;
+                            false -> [A | R]
+                        end
+                end, Bs, As).
+
similarity index 90%
rename from rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/src/rabbit_auth_backend_ldap_unit_test.erl
rename to rabbitmq-server/deps/rabbitmq_auth_backend_ldap/test/unit_SUITE.erl
index f0ed8d4266a2e2a00bd0b751629d548afde77726..b4a2b1084ec840096c5ef054674d33e9f0f8f1dc 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_auth_backend_ldap_unit_test).
+-module(unit_SUITE).
 
+-include_lib("common_test/include/ct.hrl").
 -include_lib("eunit/include/eunit.hrl").
 
-fill_test() ->
+-compile([export_all]).
+
+all() ->
+    [
+     fill
+    ].
+
+fill(_Config) ->
     F = fun(Fmt, Args, Res) ->
                 ?assertEqual(Res, rabbit_auth_backend_ldap_util:fill(Fmt, Args))
         end,
diff --git a/rabbitmq-server/deps/rabbitmq_auth_mechanism_ssl/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_auth_mechanism_ssl/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index f3ae5014399d26db644fee841394864e9b9753c4..08354b0e5be1d05275dcf18fbe3df454af9f3899 100644 (file)
@@ -1,7 +1,7 @@
 %% -*- erlang -*-
 {application, rabbitmq_auth_mechanism_ssl,
  [{description, "RabbitMQ SSL authentication (SASL EXTERNAL)"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {mod, {rabbit_auth_mechanism_ssl_app, []}},
diff --git a/rabbitmq-server/deps/rabbitmq_codegen/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_codegen/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index 843bcdf252ab1618ce06dc8d6d501b8b1a4dc1aa..e4de0e0a7390d7bcc1afddb4f839c92819a5c303 100644 (file)
@@ -15,6 +15,7 @@
 ##
 
 from __future__ import nested_scopes
+import errno
 import re
 import sys
 import os
@@ -253,8 +254,18 @@ def do_main_dict(funcDict):
         print >> sys.stderr , "  %s <function> <path_to_amqp_spec.json>... <path_to_output_file>" % (sys.argv[0])
         print >> sys.stderr , " where <function> is one of %s" % ", ".join([k for k in funcDict.keys()])
 
+    def mkdir_p(path):
+        try:
+            os.makedirs(path)
+        except OSError as exc:  # Python >2.5
+            if exc.errno == errno.EEXIST and os.path.isdir(path):
+                pass
+            else:
+                raise
+
     def execute(fn, amqp_specs, out_file):
         stdout = sys.stdout
+        mkdir_p(os.path.dirname(out_file))
         f = open(out_file, 'w')
         success = False
         try:
diff --git a/rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index cd8042216951547c452a27aba924261d42c87192..df05a5519ebd479b2468bd43908a6170e59045f3 100644 (file)
@@ -1,6 +1,7 @@
 PROJECT = rabbitmq_consistent_hash_exchange
 
 DEPS = amqp_client
+TEST_DEPS += rabbit
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
@@ -11,11 +12,8 @@ ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
 ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
-include erlang.mk
 
-# --------------------------------------------------------------------
-# Testing.
-# --------------------------------------------------------------------
+# FIXME: Remove rabbitmq_test as TEST_DEPS from here for now.
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
 
-WITH_BROKER_TEST_COMMANDS := \
-       rabbit_exchange_type_consistent_hash_test:test()
+include erlang.mk
index a19f9eea8a75101f27019839db6188eea5390e88..e3e82ce43ebbf90a42b5ef6993f141e44565e93b 100644 (file)
@@ -6,11 +6,11 @@ This plugin adds a consistent-hash exchange type to RabbitMQ.
 
 In various scenarios, you may wish to ensure that messages sent to an
 exchange are consistently and equally distributed across a number of
-different queues based on the routing key of the message, a nominated 
-header  (see "Routing on a header" below), or a message property (see 
-"Routing on a message property" below). You could arrange for this to 
-occur yourself by using a  direct  or topic exchange, binding queues 
-to that exchange and then publishing messages to that exchange that 
+different queues based on the routing key of the message, a nominated
+header  (see "Routing on a header" below), or a message property (see
+"Routing on a message property" below). You could arrange for this to
+occur yourself by using a  direct  or topic exchange, binding queues
+to that exchange and then publishing messages to that exchange that
 match the various binding keys.
 
 However, arranging things this way can be problematic:
@@ -45,10 +45,19 @@ is a number-as-a-string which indicates the number of points in the
 hash space at which you wish the queue to appear. The actual points
 are generated randomly.
 
-So, if you wish for queue A to receive twice as many messages as queue
-B, then you bind the queue A with a binding key of twice the number
-(as a string -- binding keys are always strings) of the binding key of
-the binding to queue B.
+The hashing distributes *routing keys* among queues, not *messages*
+among queues; all messages with the same routing key will go the
+same queue.  So, if you wish for queue A to receive twice as many
+routing keys routed to it than are routed to queue B, then you bind
+the queue A with a binding key of twice the number (as a string --
+binding keys are always strings) of the binding key of the binding
+to queue B.  Note this is only the case if your routing keys are
+evenly distributed in the hash space.  If, for example, only two
+distinct routing keys are used on all the messages, there's a chance
+both keys will route (consistently!) to the same queue, even though
+other queues have higher values in their binding key.  With a larger
+set of routing keys used, the statistical distribution of routing
+keys approaches the ratios of the binding keys.
 
 Each message gets delivered to at most one queue. Normally, each
 message gets delivered to exactly one queue, but there is a race
@@ -72,7 +81,7 @@ Here is an example using the Erlang client:
 
 ```erlang
 -include_lib("amqp_client/include/amqp_client.hrl").
-    
+
 test() ->
     {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
     {ok, Chan} = amqp_connection:open_channel(Conn),
@@ -100,17 +109,18 @@ test() ->
                    }, Msg) || _ <- lists:seq(1,100000)],
 amqp_connection:close(Conn),
 ok.
-```        
+```
 
 As you can see, the queues `q0` and `q1` get bound each with 10 points
 in the hash space to the exchange `e` which means they'll each get
-roughly the same number of messages. The queues `q2` and `q3` however,
-get 20 points each which means they'll each get roughly the same
-number of messages too, but that will be approximately twice as many
-as `q0` and `q1`. We then publish 100,000 messages to our exchange
-with random routing keys. After this has completed, running
-`rabbitmqctl list_queues` should show that the messages have been
-distributed approximately as desired.
+roughly the same number of routing keys. The queues `q2` and `q3`
+however, get 20 points each which means they'll each get roughly the
+same number of routing keys too, but that will be approximately twice
+as many as `q0` and `q1`. We then publish 100,000 messages to our
+exchange with random routing keys, the queues will get their share of
+messages roughly equal to the binding keys ratios. After this has
+completed, running `rabbitmqctl list_queues` should show that the
+messages have been distributed approximately as desired.
 
 Note the `routing_key`s in the bindings are numbers-as-strings. This
 is because AMQP specifies the routing_key must be a string.
@@ -153,8 +163,8 @@ header, they will all get routed to the same (arbitrarily-chosen) queue.
 ## Routing on a message property
 
 In addition to a value in the header property, you can also route on the
-``message_id``, ``correlation_id``, or ``timestamp`` message property. To do so, 
-declare the exchange with a string argument called "hash-property" naming the 
+``message_id``, ``correlation_id``, or ``timestamp`` message property. To do so,
+declare the exchange with a string argument called "hash-property" naming the
 property to be used. For example using the Erlang client as above:
 
 ```erlang
@@ -167,8 +177,8 @@ property to be used. For example using the Erlang client as above:
 ```
 
 Note that you can not declare an exchange that routes on both "hash-header" and
-"hash-property". If you specify "hash-property" and then publish messages without 
-a value in the named property, they will all get routed to the same 
+"hash-property". If you specify "hash-property" and then publish messages without
+a value in the named property, they will all get routed to the same
 (arbitrarily-chosen) queue.
 
 ## Getting Help
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index aa6a5e194cbb24b9a6f90e0b2728c61142fdc9d4..4113f0157cf399197de089fcc3bf5f870710a7c1 100644 (file)
@@ -179,7 +179,7 @@ init() ->
 find_numbers(_Source, 0, Acc) ->
     Acc;
 find_numbers(Source, N, Acc) ->
-    Number = random:uniform(?PHASH2_RANGE) - 1,
+    Number = rand_compat:uniform(?PHASH2_RANGE) - 1,
     case mnesia:read(?TABLE, {Source, Number}, write) of
         []  -> find_numbers(Source, N-1, [Number | Acc]);
         [_] -> find_numbers(Source, N, Acc)
index 1fbcd78819929537e0d74b7d7afb715acd04db15..688a6067e13c687cf8d921f80c0c43c9f1ed6f44 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_consistent_hash_exchange,
  [{description, "Consistent Hash Exchange Type"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {env, []},
similarity index 62%
rename from rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/test/src/rabbit_exchange_type_consistent_hash_test.erl
rename to rabbitmq-server/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl
index be068b79f930bfaa86c81958ea28ff9833b34a8a..d13b94a9688c3669f2203d8a92458ea49ee1fc9f 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_exchange_type_consistent_hash_test).
--export([test/0]).
+-module(rabbit_exchange_type_consistent_hash_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
 -include_lib("eunit/include/eunit.hrl").
 
-%% Because the routing is probabilistic, we can't really test a great
-%% deal here.
-
-test() ->
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                routing_test
+                               ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE}
+      ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+routing_test(Config) ->
     %% Run the test twice to test we clean up correctly
-    t([<<"q0">>, <<"q1">>, <<"q2">>, <<"q3">>]),
-    t([<<"q4">>, <<"q5">>, <<"q6">>, <<"q7">>]).
-
-t(Qs) ->
-    ok = test_with_rk(Qs),
-    ok = test_with_header(Qs),
-    ok = test_binding_with_negative_routing_key(),
-    ok = test_binding_with_non_numeric_routing_key(),
-    ok = test_with_correlation_id(Qs),
-    ok = test_with_message_id(Qs),
-    ok = test_with_timestamp(Qs),
-    ok = test_non_supported_property(),
-    ok = test_mutually_exclusive_arguments(),
+    routing_test0(Config, [<<"q0">>, <<"q1">>, <<"q2">>, <<"q3">>]),
+    routing_test0(Config, [<<"q4">>, <<"q5">>, <<"q6">>, <<"q7">>]),
+
+    passed.
+
+routing_test0(Config, Qs) ->
+    ok = test_with_rk(Config, Qs),
+    ok = test_with_header(Config, Qs),
+    ok = test_binding_with_negative_routing_key(Config),
+    ok = test_binding_with_non_numeric_routing_key(Config),
+    ok = test_with_correlation_id(Config, Qs),
+    ok = test_with_message_id(Config, Qs),
+    ok = test_with_timestamp(Config, Qs),
+    ok = test_non_supported_property(Config),
+    ok = test_mutually_exclusive_arguments(Config),
     ok.
 
-test_with_rk(Qs) ->
-    test0(fun () ->
+%% -------------------------------------------------------------------
+%% Implementation
+%% -------------------------------------------------------------------
+
+test_with_rk(Config, Qs) ->
+    test0(Config, fun () ->
                   #'basic.publish'{exchange = <<"e">>, routing_key = rnd()}
           end,
           fun() ->
                   #amqp_msg{props = #'P_basic'{}, payload = <<>>}
           end, [], Qs).
 
-test_with_header(Qs) ->
-    test0(fun () ->
+test_with_header(Config, Qs) ->
+    test0(Config, fun () ->
                   #'basic.publish'{exchange = <<"e">>}
           end,
           fun() ->
@@ -57,33 +109,33 @@ test_with_header(Qs) ->
           end, [{<<"hash-header">>, longstr, <<"hashme">>}], Qs).
 
 
-test_with_correlation_id(Qs) ->
-    test0(fun() ->
+test_with_correlation_id(Config, Qs) ->
+    test0(Config, fun() ->
                   #'basic.publish'{exchange = <<"e">>}
           end,
           fun() ->
                   #amqp_msg{props = #'P_basic'{correlation_id = rnd()}, payload = <<>>}
           end, [{<<"hash-property">>, longstr, <<"correlation_id">>}], Qs).
 
-test_with_message_id(Qs) ->
-    test0(fun() ->
+test_with_message_id(Config, Qs) ->
+    test0(Config, fun() ->
                   #'basic.publish'{exchange = <<"e">>}
           end,
           fun() ->
                   #amqp_msg{props = #'P_basic'{message_id = rnd()}, payload = <<>>}
           end, [{<<"hash-property">>, longstr, <<"message_id">>}], Qs).
 
-test_with_timestamp(Qs) ->
-    test0(fun() ->
+test_with_timestamp(Config, Qs) ->
+    test0(Config, fun() ->
                   #'basic.publish'{exchange = <<"e">>}
           end,
           fun() ->
                   #amqp_msg{props = #'P_basic'{timestamp = rndint()}, payload = <<>>}
           end, [{<<"hash-property">>, longstr, <<"timestamp">>}], Qs).
 
-test_mutually_exclusive_arguments() ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Chan} = amqp_connection:open_channel(Conn),
+test_mutually_exclusive_arguments(Config) ->
+    Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+
     process_flag(trap_exit, true),
     Cmd = #'exchange.declare'{
              exchange  = <<"fail">>,
@@ -92,12 +144,13 @@ test_mutually_exclusive_arguments() ->
                           {<<"hash-property">>, longstr, <<"bar">>}]
             },
     ?assertExit(_, amqp_channel:call(Chan, Cmd)),
-    amqp_connection:close(Conn),
+
+    rabbit_ct_client_helpers:close_channel(Chan),
     ok.
 
-test_non_supported_property() ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Chan} = amqp_connection:open_channel(Conn),
+test_non_supported_property(Config) ->
+    Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+
     process_flag(trap_exit, true),
     Cmd = #'exchange.declare'{
              exchange  = <<"fail">>,
@@ -105,20 +158,20 @@ test_non_supported_property() ->
              arguments = [{<<"hash-property">>, longstr, <<"app_id">>}]
             },
     ?assertExit(_, amqp_channel:call(Chan, Cmd)),
-    amqp_connection:close(Conn),
+
+    rabbit_ct_client_helpers:close_channel(Chan),
     ok.
 
 rnd() ->
     list_to_binary(integer_to_list(rndint())).
 
 rndint() ->
-    random:uniform(1000000).
+    rand_compat:uniform(1000000).
 
-test0(MakeMethod, MakeMsg, DeclareArgs, [Q1, Q2, Q3, Q4] = Queues) ->
+test0(Config, MakeMethod, MakeMsg, DeclareArgs, [Q1, Q2, Q3, Q4] = Queues) ->
     Count = 10000,
+    Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
 
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Chan} = amqp_connection:open_channel(Conn),
     #'exchange.declare_ok'{} =
         amqp_channel:call(Chan,
                           #'exchange.declare' {
@@ -156,13 +209,13 @@ test0(MakeMethod, MakeMsg, DeclareArgs, [Q1, Q2, Q3, Q4] = Queues) ->
     [true = C > 0.01 * Count || C <- Counts], %% We are not *grossly* unfair
     amqp_channel:call(Chan, #'exchange.delete' {exchange = <<"e">>}),
     [amqp_channel:call(Chan, #'queue.delete' {queue = Q}) || Q <- Queues],
-    amqp_channel:close(Chan),
-    amqp_connection:close(Conn),
+
+    rabbit_ct_client_helpers:close_channel(Chan),
     ok.
 
-test_binding_with_negative_routing_key() ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Chan} = amqp_connection:open_channel(Conn),
+test_binding_with_negative_routing_key(Config) ->
+    Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+
     Declare1 = #'exchange.declare'{exchange = <<"bind-fail">>,
                                    type = <<"x-consistent-hash">>},
     #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare1),
@@ -173,14 +226,16 @@ test_binding_with_negative_routing_key() ->
     Cmd = #'queue.bind'{exchange = <<"bind-fail">>,
                         routing_key = <<"-1">>},
     ?assertExit(_, amqp_channel:call(Chan, Cmd)),
-    {ok, Ch2} = amqp_connection:open_channel(Conn),
+    Ch2 = rabbit_ct_client_helpers:open_channel(Config, 0),
     amqp_channel:call(Ch2, #'queue.delete'{queue = Q}),
-    amqp_connection:close(Conn),
+
+    rabbit_ct_client_helpers:close_channel(Chan),
+    rabbit_ct_client_helpers:close_channel(Ch2),
     ok.
 
-test_binding_with_non_numeric_routing_key() ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Chan} = amqp_connection:open_channel(Conn),
+test_binding_with_non_numeric_routing_key(Config) ->
+    Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
+
     Declare1 = #'exchange.declare'{exchange = <<"bind-fail">>,
                                    type = <<"x-consistent-hash">>},
     #'exchange.declare_ok'{} = amqp_channel:call(Chan, Declare1),
@@ -191,7 +246,9 @@ test_binding_with_non_numeric_routing_key() ->
     Cmd = #'queue.bind'{exchange = <<"bind-fail">>,
                         routing_key = <<"not-a-number">>},
     ?assertExit(_, amqp_channel:call(Chan, Cmd)),
-    {ok, Ch2} = amqp_connection:open_channel(Conn),
+
+    Ch2 = rabbit_ct_client_helpers:open_channel(Config, 0),
     amqp_channel:call(Ch2, #'queue.delete'{queue = Q}),
-    amqp_connection:close(Conn),
+
+    rabbit_ct_client_helpers:close_channel(Chan),
     ok.
diff --git a/rabbitmq-server/deps/rabbitmq_event_exchange/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_event_exchange/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
diff --git a/rabbitmq-server/deps/rabbitmq_event_exchange/CONTRIBUTING.md b/rabbitmq-server/deps/rabbitmq_event_exchange/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..45bbcbe
--- /dev/null
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 7ce8e2a2b452320cf190b167076cac35113aaf28..a12edbc972523a86dac6652192c6a544859042e2 100644 (file)
@@ -1,21 +1,20 @@
 PROJECT = rabbitmq_event_exchange
 
-TEST_DEPS = amqp_client
+TEST_DEPS += amqp_client
+TEST_DEPS += rabbit 
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
 # FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
 # reviewed and merged.
-
 ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
 ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
-include erlang.mk
 
 # --------------------------------------------------------------------
 # Testing.
 # --------------------------------------------------------------------
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
 
-WITH_BROKER_TEST_COMMANDS := \
-       rabbit_exchange_type_event_test_all:all_tests()
+include erlang.mk
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index 87095ded2a0985577ad385cc08a1b327399781b1..978b33537968b71355620f87755fd4d293a886a7 100644 (file)
@@ -26,8 +26,6 @@
 
 -define(EXCH_NAME, <<"amq.rabbitmq.event">>).
 
--import(rabbit_misc, [pget/2, pget/3]).
-
 -rabbit_boot_step({rabbit_event_exchange,
                    [{description, "event exchange"},
                     {mfa,         {?MODULE, register, []}},
 %%----------------------------------------------------------------------------
 
 register() ->
-    rabbit_exchange:declare(x(), topic, true, false, true, []),
+    rabbit_exchange:declare(exchange(), topic, true, false, true, []),
     gen_event:add_handler(rabbit_event, ?MODULE, []).
 
 unregister() ->
     gen_event:delete_handler(rabbit_event, ?MODULE, []).
 
-x() ->
+exchange() ->
     VHost = ensure_vhost_exists(),
     rabbit_misc:r(VHost, exchange, ?EXCH_NAME).
 
@@ -60,15 +58,17 @@ handle_event(#event{type      = Type,
                     reference = none}, State) ->
     case key(Type) of
         ignore -> ok;
-        Key    -> PBasic = #'P_basic'{delivery_mode = 2,
-                                      headers = fmt_proplist(Props),
+        Key    ->
+                  Props2 = [{<<"timestamp_in_ms">>, TS} | Props],
+                  PBasic = #'P_basic'{delivery_mode = 2,
+                                      headers = fmt_proplist(Props2),
                                       %% 0-9-1 says the timestamp is a
                                       %% "64 bit POSIX
                                       %% timestamp". That's second
                                       %% resolution, not millisecond.
                                       timestamp = time_compat:convert_time_unit(
                                                     TS, milli_seconds, seconds)},
-                  Msg = rabbit_basic:message(x(), Key, PBasic, <<>>),
+                  Msg = rabbit_basic:message(exchange(), Key, PBasic, <<>>),
                   rabbit_basic:publish(
                     rabbit_basic:delivery(false, false, Msg, undefined))
     end,
index f1dc8f533852b0785084c64597bcf6a8ecf5fd2d..971b52da980ef6c504036e199b0b4103c993a229 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_event_exchange,
  [{description, "Event Exchange Type"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {env, []},
diff --git a/rabbitmq-server/deps/rabbitmq_event_exchange/test/src/rabbit_exchange_type_event_test.erl b/rabbitmq-server/deps/rabbitmq_event_exchange/test/src/rabbit_exchange_type_event_test.erl
deleted file mode 100644 (file)
index 80a79ec..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ Consistent Hash Exchange.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(rabbit_exchange_type_event_test).
--include_lib("eunit/include/eunit.hrl").
-
--include_lib("amqp_client/include/amqp_client.hrl").
-
-%% Only really tests that we're not completely broken.
-simple_test() ->
-    Now = time_compat:os_system_time(seconds),
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    #'queue.declare_ok'{queue = Q} =
-        amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
-    amqp_channel:call(Ch, #'queue.bind'{queue       = Q,
-                                        exchange    = <<"amq.rabbitmq.event">>,
-                                        routing_key = <<"queue.*">>}),
-    amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true},
-                           self()),
-    receive
-        #'basic.consume_ok'{} -> ok
-    end,
-
-    #'queue.declare_ok'{queue = Q2} =
-        amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
-
-    receive
-        {#'basic.deliver'{routing_key = Key},
-         #amqp_msg{props = #'P_basic'{headers = Headers, timestamp = TS}}} ->
-            %% timestamp is within the last 5 seconds
-            ?assert((TS - Now) =< 5),
-            ?assertMatch(<<"queue.created">>, Key),
-            ?assertMatch({longstr, Q2}, rabbit_misc:table_lookup(
-                                          Headers, <<"name">>))
-    end,
-
-    amqp_connection:close(Conn),
-    ok.
diff --git a/rabbitmq-server/deps/rabbitmq_event_exchange/test/system_SUITE.erl b/rabbitmq-server/deps/rabbitmq_event_exchange/test/system_SUITE.erl
new file mode 100644 (file)
index 0000000..740d156
--- /dev/null
@@ -0,0 +1,123 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Consistent Hash Exchange.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(system_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+-compile(export_all).
+
+all() ->
+    [
+     queue_created,
+     authentication
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE}
+      ]),
+    Config2 = rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()),
+    Config2.
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).   
+
+
+%% -------------------------------------------------------------------
+%% Testsuite cases 
+%% -------------------------------------------------------------------
+
+%% Only really tests that we're not completely broken.
+queue_created(Config) ->
+    Now = time_compat:os_system_time(seconds),
+
+    Ch =  rabbit_ct_client_helpers:open_channel(Config, 0),
+    #'queue.declare_ok'{queue = Q} =
+        amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+    amqp_channel:call(Ch, #'queue.bind'{queue       = Q,
+                                        exchange    = <<"amq.rabbitmq.event">>,
+                                        routing_key = <<"queue.*">>}),
+    amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true},
+                           self()),
+    receive
+        #'basic.consume_ok'{} -> ok
+    end,
+
+    #'queue.declare_ok'{queue = Q2} =
+        amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+
+    receive
+        {#'basic.deliver'{routing_key = Key},
+         #amqp_msg{props = #'P_basic'{headers = Headers, timestamp = TS}}} ->
+            %% timestamp is within the last 5 seconds
+            true = ((TS - Now) =< 5),
+            <<"queue.created">> = Key,
+            {longstr, Q2} = rabbit_misc:table_lookup(Headers, <<"name">>)
+    end,
+
+    rabbit_ct_client_helpers:close_channel(Ch),
+    ok.
+
+
+authentication(Config) ->
+    Ch =  rabbit_ct_client_helpers:open_channel(Config, 0),
+
+    #'queue.declare_ok'{queue = Q} =
+        amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+    amqp_channel:call(Ch, #'queue.bind'{queue       = Q,
+                                        exchange    = <<"amq.rabbitmq.event">>,
+                                        routing_key = <<"user.#">>}),
+    Conn2 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0),
+
+    amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q, no_ack = true},
+                           self()),
+    receive
+        #'basic.consume_ok'{} -> ok
+    end,
+
+    receive
+        {#'basic.deliver'{routing_key = Key},
+         #amqp_msg{props = #'P_basic'{headers = Headers}}} ->
+            <<"user.authentication.success">> = Key,
+            undefined = rabbit_misc:table_lookup(Headers, <<"vhost">>),
+            {longstr, _PeerHost} = rabbit_misc:table_lookup(Headers, <<"peer_host">>),
+            {bool, false} = rabbit_misc:table_lookup(Headers, <<"ssl">>)
+    end,
+
+    amqp_connection:close(Conn2),
+    ok.
similarity index 89%
rename from rabbitmq-server/deps/rabbitmq_event_exchange/test/src/rabbit_exchange_type_event_unit_test.erl
rename to rabbitmq-server/deps/rabbitmq_event_exchange/test/unit_SUITE.erl
index 89e5b23811b5631bc5fa7d5393263d55ef8bce2e..18d691b0ac46628a8dce6a2595dc031c9ae0cf6f 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_exchange_type_event_unit_test).
+-module(unit_SUITE).
 
--include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
 
-encoding_test() ->
-    T = fun (In, Exp) ->
-                ?assertEqual(rabbit_exchange_type_event:fmt_proplist(In), Exp)
-        end,
+-compile(export_all).
+
+all() -> [ encoding ].
+
+encoding(_) ->
+    T = fun (In, Exp) -> 
+                true = (rabbit_exchange_type_event:fmt_proplist(In) == Exp) end,
     T([{name, <<"test">>}],
       [{<<"name">>, longstr, <<"test">>}]),
     T([{name, rabbit_misc:r(<<"/">>, exchange, <<"test">>)}],
diff --git a/rabbitmq-server/deps/rabbitmq_federation/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_federation/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index c1f753078c4b85d63d53638b074c9d90fe55378c..7a28a0ec7d2721e448e56553e935fe1caa375c27 100644 (file)
@@ -1,7 +1,7 @@
 PROJECT = rabbitmq_federation
 
 DEPS = amqp_client
-TEST_DEPS = rabbit
+TEST_DEPS += rabbit
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
@@ -12,25 +12,8 @@ ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
 ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
-include erlang.mk
-
-# --------------------------------------------------------------------
-# Testing.
-# --------------------------------------------------------------------
-
-FILTER := all
-COVER := false
 
-WITH_BROKER_TEST_COMMANDS := \
-       rabbit_test_runner:run_in_broker(\"$(CURDIR)/test\",\"$(FILTER)\")
-WITH_BROKER_SETUP_SCRIPTS := $(CURDIR)/etc/setup-rabbit-test.sh
+# FIXME: Remove rabbitmq_test as TEST_DEPS from here for now.
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
 
-TEST_PLUGINS_ROOTDIR := $(TEST_TMPDIR)/plugins
-
-STANDALONE_TEST_COMMANDS := \
-       rabbit_test_runner:run_multi(\"$(DEPS_DIR)\",\"$(CURDIR)/test\",\"$(FILTER)\",$(COVER),\"$(TEST_PLUGINS_ROOTDIR)\")
-
-pre-standalone-tests:: test-tmpdir test-dist
-       $(verbose) rm -rf $(TEST_PLUGINS_ROOTDIR)
-       $(exec_verbose) mkdir -p $(TEST_PLUGINS_ROOTDIR)
-       $(verbose) cp -a $(DIST_DIR) $(TEST_PLUGINS_ROOTDIR)
+include erlang.mk
index 9f0c0c38494c4beabf27ccddfa996d51d66a91d8..f7ca7bebb76849368b9a6bf56f1bdea9e847604d 100644 (file)
@@ -16,7 +16,7 @@
 
 ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
 
-ERLANG_MK_VERSION = 2.0.0-pre.2-16-gb52203c-dirty
+ERLANG_MK_VERSION = 2.0.0-pre.2-132-g62d576b
 
 # Core configuration.
 
@@ -24,6 +24,7 @@ PROJECT ?= $(notdir $(CURDIR))
 PROJECT := $(strip $(PROJECT))
 
 PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
 
 # Verbosity.
 
@@ -84,7 +85,7 @@ all:: deps app rel
 rel::
        $(verbose) :
 
-check:: clean app tests
+check:: tests
 
 clean:: clean-crashdump
 
@@ -283,7 +284,7 @@ pkg_apns_description = Apple Push Notification Server for Erlang
 pkg_apns_homepage = http://inaka.github.com/apns4erl
 pkg_apns_fetch = git
 pkg_apns_repo = https://github.com/inaka/apns4erl
-pkg_apns_commit = 1.0.4
+pkg_apns_commit = master
 
 PACKAGES += azdht
 pkg_azdht_name = azdht
@@ -387,7 +388,7 @@ pkg_bitcask_description = because you need another a key/value storage engine
 pkg_bitcask_homepage = https://github.com/basho/bitcask
 pkg_bitcask_fetch = git
 pkg_bitcask_repo = https://github.com/basho/bitcask
-pkg_bitcask_commit = master
+pkg_bitcask_commit = develop
 
 PACKAGES += bitstore
 pkg_bitstore_name = bitstore
@@ -421,6 +422,14 @@ pkg_boss_db_fetch = git
 pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
 pkg_boss_db_commit = master
 
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
 PACKAGES += bson
 pkg_bson_name = bson
 pkg_bson_description = BSON documents in Erlang, see bsonspec.org
@@ -451,7 +460,7 @@ pkg_cake_description = Really simple terminal colorization
 pkg_cake_homepage = https://github.com/darach/cake-erl
 pkg_cake_fetch = git
 pkg_cake_repo = https://github.com/darach/cake-erl
-pkg_cake_commit = v0.1.2
+pkg_cake_commit = master
 
 PACKAGES += carotene
 pkg_carotene_name = carotene
@@ -787,7 +796,7 @@ pkg_cowboy_description = Small, fast and modular HTTP server.
 pkg_cowboy_homepage = http://ninenines.eu
 pkg_cowboy_fetch = git
 pkg_cowboy_repo = https://github.com/ninenines/cowboy
-pkg_cowboy_commit = 1.0.1
+pkg_cowboy_commit = 1.0.4
 
 PACKAGES += cowdb
 pkg_cowdb_name = cowdb
@@ -803,7 +812,7 @@ pkg_cowlib_description = Support library for manipulating Web protocols.
 pkg_cowlib_homepage = http://ninenines.eu
 pkg_cowlib_fetch = git
 pkg_cowlib_repo = https://github.com/ninenines/cowlib
-pkg_cowlib_commit = 1.0.1
+pkg_cowlib_commit = 1.0.2
 
 PACKAGES += cpg
 pkg_cpg_name = cpg
@@ -885,14 +894,6 @@ pkg_dh_date_fetch = git
 pkg_dh_date_repo = https://github.com/daleharvey/dh_date
 pkg_dh_date_commit = master
 
-PACKAGES += dhtcrawler
-pkg_dhtcrawler_name = dhtcrawler
-pkg_dhtcrawler_description = dhtcrawler is a DHT crawler written in erlang. It can join a DHT network and crawl many P2P torrents.
-pkg_dhtcrawler_homepage = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_fetch = git
-pkg_dhtcrawler_repo = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_commit = master
-
 PACKAGES += dirbusterl
 pkg_dirbusterl_name = dirbusterl
 pkg_dirbusterl_description = DirBuster successor in Erlang
@@ -1053,14 +1054,6 @@ pkg_efene_fetch = git
 pkg_efene_repo = https://github.com/efene/efene
 pkg_efene_commit = master
 
-PACKAGES += eganglia
-pkg_eganglia_name = eganglia
-pkg_eganglia_description = Erlang library to interact with Ganglia
-pkg_eganglia_homepage = https://github.com/inaka/eganglia
-pkg_eganglia_fetch = git
-pkg_eganglia_repo = https://github.com/inaka/eganglia
-pkg_eganglia_commit = v0.9.1
-
 PACKAGES += egeoip
 pkg_egeoip_name = egeoip
 pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
@@ -1075,7 +1068,7 @@ pkg_ehsa_description = Erlang HTTP server basic and digest authentication module
 pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
 pkg_ehsa_fetch = hg
 pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
-pkg_ehsa_commit = 2.0.4
+pkg_ehsa_commit = default
 
 PACKAGES += ej
 pkg_ej_name = ej
@@ -1139,7 +1132,7 @@ pkg_elvis_description = Erlang Style Reviewer
 pkg_elvis_homepage = https://github.com/inaka/elvis
 pkg_elvis_fetch = git
 pkg_elvis_repo = https://github.com/inaka/elvis
-pkg_elvis_commit = 0.2.4
+pkg_elvis_commit = master
 
 PACKAGES += emagick
 pkg_emagick_name = emagick
@@ -1515,7 +1508,7 @@ pkg_erwa_description = A WAMP router and client written in Erlang.
 pkg_erwa_homepage = https://github.com/bwegh/erwa
 pkg_erwa_fetch = git
 pkg_erwa_repo = https://github.com/bwegh/erwa
-pkg_erwa_commit = 0.1.1
+pkg_erwa_commit = master
 
 PACKAGES += espec
 pkg_espec_name = espec
@@ -1619,7 +1612,7 @@ pkg_exometer_description = Basic measurement objects and probe behavior
 pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
 pkg_exometer_fetch = git
 pkg_exometer_repo = https://github.com/Feuerlabs/exometer
-pkg_exometer_commit = 1.2
+pkg_exometer_commit = master
 
 PACKAGES += exs1024
 pkg_exs1024_name = exs1024
@@ -1683,7 +1676,15 @@ pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
 pkg_feeder_homepage = https://github.com/michaelnisi/feeder
 pkg_feeder_fetch = git
 pkg_feeder_repo = https://github.com/michaelnisi/feeder
-pkg_feeder_commit = v1.4.6
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
 
 PACKAGES += fix
 pkg_fix_name = fix
@@ -1781,6 +1782,14 @@ pkg_geef_fetch = git
 pkg_geef_repo = https://github.com/carlosmn/geef
 pkg_geef_commit = master
 
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
 PACKAGES += gen_cycle
 pkg_gen_cycle_name = gen_cycle
 pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
@@ -1837,6 +1846,14 @@ pkg_gen_unix_fetch = git
 pkg_gen_unix_repo = https://github.com/msantos/gen_unix
 pkg_gen_unix_commit = master
 
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
 PACKAGES += getopt
 pkg_getopt_name = getopt
 pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
@@ -1981,13 +1998,21 @@ pkg_hyper_fetch = git
 pkg_hyper_repo = https://github.com/GameAnalytics/hyper
 pkg_hyper_commit = master
 
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
 PACKAGES += ibrowse
 pkg_ibrowse_name = ibrowse
 pkg_ibrowse_description = Erlang HTTP client
 pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
 pkg_ibrowse_fetch = git
 pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
-pkg_ibrowse_commit = v4.1.1
+pkg_ibrowse_commit = master
 
 PACKAGES += ierlang
 pkg_ierlang_name = ierlang
@@ -2043,7 +2068,7 @@ pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
 pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
 pkg_jamdb_sybase_fetch = git
 pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
-pkg_jamdb_sybase_commit = 0.6.0
+pkg_jamdb_sybase_commit = master
 
 PACKAGES += jerg
 pkg_jerg_name = jerg
@@ -2056,9 +2081,9 @@ pkg_jerg_commit = master
 PACKAGES += jesse
 pkg_jesse_name = jesse
 pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
-pkg_jesse_homepage = https://github.com/klarna/jesse
+pkg_jesse_homepage = https://github.com/for-GET/jesse
 pkg_jesse_fetch = git
-pkg_jesse_repo = https://github.com/klarna/jesse
+pkg_jesse_repo = https://github.com/for-GET/jesse
 pkg_jesse_commit = master
 
 PACKAGES += jiffy
@@ -2075,7 +2100,7 @@ pkg_jiffy_v_description = JSON validation utility
 pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
 pkg_jiffy_v_fetch = git
 pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
-pkg_jiffy_v_commit = 0.3.3
+pkg_jiffy_v_commit = master
 
 PACKAGES += jobs
 pkg_jobs_name = jobs
@@ -2083,7 +2108,7 @@ pkg_jobs_description = a Job scheduler for load regulation
 pkg_jobs_homepage = https://github.com/esl/jobs
 pkg_jobs_fetch = git
 pkg_jobs_repo = https://github.com/esl/jobs
-pkg_jobs_commit = 0.3
+pkg_jobs_commit = master
 
 PACKAGES += joxa
 pkg_joxa_name = joxa
@@ -2109,6 +2134,14 @@ pkg_json_rec_fetch = git
 pkg_json_rec_repo = https://github.com/justinkirby/json_rec
 pkg_json_rec_commit = master
 
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
 PACKAGES += jsonerl
 pkg_jsonerl_name = jsonerl
 pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
@@ -2149,6 +2182,14 @@ pkg_kafka_fetch = git
 pkg_kafka_repo = https://github.com/wooga/kafka-erlang
 pkg_kafka_commit = master
 
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
 PACKAGES += kai
 pkg_kai_name = kai
 pkg_kai_description = DHT storage by Takeshi Inoue
@@ -2291,7 +2332,7 @@ pkg_lasse_description = SSE handler for Cowboy
 pkg_lasse_homepage = https://github.com/inaka/lasse
 pkg_lasse_fetch = git
 pkg_lasse_repo = https://github.com/inaka/lasse
-pkg_lasse_commit = 0.1.0
+pkg_lasse_commit = master
 
 PACKAGES += ldap
 pkg_ldap_name = ldap
@@ -2501,6 +2542,14 @@ pkg_merl_fetch = git
 pkg_merl_repo = https://github.com/richcarl/merl
 pkg_merl_commit = master
 
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
 PACKAGES += mimetypes
 pkg_mimetypes_name = mimetypes
 pkg_mimetypes_description = Erlang MIME types library
@@ -2733,21 +2782,13 @@ pkg_oauth2_fetch = git
 pkg_oauth2_repo = https://github.com/kivra/oauth2
 pkg_oauth2_commit = master
 
-PACKAGES += oauth2c
-pkg_oauth2c_name = oauth2c
-pkg_oauth2c_description = Erlang OAuth2 Client
-pkg_oauth2c_homepage = https://github.com/kivra/oauth2_client
-pkg_oauth2c_fetch = git
-pkg_oauth2c_repo = https://github.com/kivra/oauth2_client
-pkg_oauth2c_commit = master
-
 PACKAGES += octopus
 pkg_octopus_name = octopus
 pkg_octopus_description = Small and flexible pool manager written in Erlang
 pkg_octopus_homepage = https://github.com/erlangbureau/octopus
 pkg_octopus_fetch = git
 pkg_octopus_repo = https://github.com/erlangbureau/octopus
-pkg_octopus_commit = 1.0.0
+pkg_octopus_commit = master
 
 PACKAGES += of_protocol
 pkg_of_protocol_name = of_protocol
@@ -2819,7 +2860,7 @@ pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
 pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
 pkg_pegjs_fetch = git
 pkg_pegjs_repo = https://github.com/dmitriid/pegjs
-pkg_pegjs_commit = 0.3
+pkg_pegjs_commit = master
 
 PACKAGES += percept2
 pkg_percept2_name = percept2
@@ -2987,7 +3028,7 @@ pkg_qdate_description = Date, time, and timezone parsing, formatting, and conver
 pkg_qdate_homepage = https://github.com/choptastic/qdate
 pkg_qdate_fetch = git
 pkg_qdate_repo = https://github.com/choptastic/qdate
-pkg_qdate_commit = 0.4.0
+pkg_qdate_commit = master
 
 PACKAGES += qrcode
 pkg_qrcode_name = qrcode
@@ -3059,7 +3100,7 @@ pkg_ranch_description = Socket acceptor pool for TCP protocols.
 pkg_ranch_homepage = http://ninenines.eu
 pkg_ranch_fetch = git
 pkg_ranch_repo = https://github.com/ninenines/ranch
-pkg_ranch_commit = 1.1.0
+pkg_ranch_commit = 1.2.1
 
 PACKAGES += rbeacon
 pkg_rbeacon_name = rbeacon
@@ -3099,7 +3140,7 @@ pkg_recon_description = Collection of functions and scripts to debug Erlang in p
 pkg_recon_homepage = https://github.com/ferd/recon
 pkg_recon_fetch = git
 pkg_recon_repo = https://github.com/ferd/recon
-pkg_recon_commit = 2.2.1
+pkg_recon_commit = master
 
 PACKAGES += record_info
 pkg_record_info_name = record_info
@@ -3293,6 +3334,14 @@ pkg_rlimit_fetch = git
 pkg_rlimit_repo = https://github.com/jlouis/rlimit
 pkg_rlimit_commit = master
 
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
 PACKAGES += safetyvalve
 pkg_safetyvalve_name = safetyvalve
 pkg_safetyvalve_description = A safety valve for your erlang node
@@ -3363,7 +3412,7 @@ pkg_shotgun_description = better than just a gun
 pkg_shotgun_homepage = https://github.com/inaka/shotgun
 pkg_shotgun_fetch = git
 pkg_shotgun_repo = https://github.com/inaka/shotgun
-pkg_shotgun_commit = 0.1.0
+pkg_shotgun_commit = master
 
 PACKAGES += sidejob
 pkg_sidejob_name = sidejob
@@ -3421,6 +3470,14 @@ pkg_skel_fetch = git
 pkg_skel_repo = https://github.com/ParaPhrase/skel
 pkg_skel_commit = master
 
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
 PACKAGES += smother
 pkg_smother_name = smother
 pkg_smother_description = Extended code coverage metrics for Erlang.
@@ -3533,6 +3590,14 @@ pkg_stripe_fetch = git
 pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
 pkg_stripe_commit = v1
 
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
 PACKAGES += surrogate
 pkg_surrogate_name = surrogate
 pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
@@ -3567,7 +3632,7 @@ pkg_switchboard_commit = master
 
 PACKAGES += syn
 pkg_syn_name = syn
-pkg_syn_description = A global process registry for Erlang.
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
 pkg_syn_homepage = https://github.com/ostinelli/syn
 pkg_syn_fetch = git
 pkg_syn_repo = https://github.com/ostinelli/syn
@@ -3739,7 +3804,7 @@ pkg_unicorn_description = Generic configuration server
 pkg_unicorn_homepage = https://github.com/shizzard/unicorn
 pkg_unicorn_fetch = git
 pkg_unicorn_repo = https://github.com/shizzard/unicorn
-pkg_unicorn_commit = 0.3.0
+pkg_unicorn_commit = master
 
 PACKAGES += unsplit
 pkg_unsplit_name = unsplit
@@ -3755,7 +3820,7 @@ pkg_uuid_description = Erlang UUID Implementation
 pkg_uuid_homepage = https://github.com/okeuday/uuid
 pkg_uuid_fetch = git
 pkg_uuid_repo = https://github.com/okeuday/uuid
-pkg_uuid_commit = v1.4.0
+pkg_uuid_commit = master
 
 PACKAGES += ux
 pkg_ux_name = ux
@@ -3875,7 +3940,7 @@ pkg_worker_pool_description = a simple erlang worker pool
 pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
 pkg_worker_pool_fetch = git
 pkg_worker_pool_repo = https://github.com/inaka/worker_pool
-pkg_worker_pool_commit = 1.0.3
+pkg_worker_pool_commit = master
 
 PACKAGES += wrangler
 pkg_wrangler_name = wrangler
@@ -3907,7 +3972,7 @@ pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
 pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
 pkg_xref_runner_fetch = git
 pkg_xref_runner_repo = https://github.com/inaka/xref_runner
-pkg_xref_runner_commit = 0.2.0
+pkg_xref_runner_commit = master
 
 PACKAGES += yamerl
 pkg_yamerl_name = yamerl
@@ -3941,13 +4006,21 @@ pkg_zab_engine_fetch = git
 pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
 pkg_zab_engine_commit = master
 
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
 PACKAGES += zeta
 pkg_zeta_name = zeta
 pkg_zeta_description = HTTP access log parser in Erlang
 pkg_zeta_homepage = https://github.com/s1n4/zeta
 pkg_zeta_fetch = git
 pkg_zeta_repo = https://github.com/s1n4/zeta
-pkg_zeta_commit =  
+pkg_zeta_commit = master
 
 PACKAGES += zippers
 pkg_zippers_name = zippers
@@ -4063,6 +4136,9 @@ deps::
 else
 deps:: $(ALL_DEPS_DIRS)
 ifndef IS_APP
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               mkdir -p $$dep/ebin; \
+       done
        $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
                $(MAKE) -C $$dep IS_APP=1 || exit $$?; \
        done
@@ -4092,7 +4168,10 @@ endif
 # While Makefile file could be GNUmakefile or makefile,
 # in practice only Makefile is needed so far.
 define dep_autopatch
-       if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+               $(call dep_autopatch_erlang_mk,$(1)); \
+       elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
                if [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
                        $(call dep_autopatch2,$(1)); \
                elif [ 0 != `grep -ci rebar $(DEPS_DIR)/$(1)/Makefile` ]; then \
@@ -4100,12 +4179,7 @@ define dep_autopatch
                elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i rebar '{}' \;`" ]; then \
                        $(call dep_autopatch2,$(1)); \
                else \
-                       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
-                               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
-                               $(call dep_autopatch_erlang_mk,$(1)); \
-                       else \
-                               $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
-                       fi \
+                       $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
                fi \
        else \
                if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
@@ -4117,8 +4191,11 @@ define dep_autopatch
 endef
 
 define dep_autopatch2
+       if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+               $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+       fi; \
        $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
-       if [ -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
+       if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
                $(call dep_autopatch_fetch_rebar); \
                $(call dep_autopatch_rebar,$(1)); \
        else \
@@ -4256,57 +4333,6 @@ define dep_autopatch_rebar.erl
                                Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
                end
        end(),
-       FindFirst = fun(F, Fd) ->
-               case io:parse_erl_form(Fd, undefined) of
-                       {ok, {attribute, _, compile, {parse_transform, PT}}, _} ->
-                               [PT, F(F, Fd)];
-                       {ok, {attribute, _, compile, CompileOpts}, _} when is_list(CompileOpts) ->
-                               case proplists:get_value(parse_transform, CompileOpts) of
-                                       undefined -> [F(F, Fd)];
-                                       PT -> [PT, F(F, Fd)]
-                               end;
-                       {ok, {attribute, _, include, Hrl}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
-                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                       _ ->
-                                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ Hrl, [read]) of
-                                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                                       _ -> [F(F, Fd)]
-                                               end
-                               end;
-                       {ok, {attribute, _, include_lib, "$(1)/include/" ++ Hrl}, _} ->
-                               {ok, HrlFd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]),
-                               [F(F, HrlFd), F(F, Fd)];
-                       {ok, {attribute, _, include_lib, Hrl}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
-                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                       _ -> [F(F, Fd)]
-                               end;
-                       {ok, {attribute, _, import, {Imp, _}}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(Imp) ++ ".erl", [read]) of
-                                       {ok, ImpFd} -> [Imp, F(F, ImpFd), F(F, Fd)];
-                                       _ -> [F(F, Fd)]
-                               end;
-                       {eof, _} ->
-                               file:close(Fd),
-                               [];
-                       _ ->
-                               F(F, Fd)
-               end
-       end,
-       fun() ->
-               ErlFiles = filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)*.erl"),
-               First0 = lists:usort(lists:flatten([begin
-                       {ok, Fd} = file:open(F, [read]),
-                       FindFirst(FindFirst, Fd)
-               end || F <- ErlFiles])),
-               First = lists:flatten([begin
-                       {ok, Fd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", [read]),
-                       FindFirst(FindFirst, Fd)
-               end || M <- First0, lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)]) ++ First0,
-               Write(["COMPILE_FIRST +=", [[" ", atom_to_list(M)] || M <- First,
-                       lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)], "\n"])
-       end(),
        Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
        Write("\npreprocess::\n"),
        Write("\npre-deps::\n"),
@@ -4374,9 +4400,9 @@ define dep_autopatch_rebar.erl
                [] -> ok;
                _ ->
                        Write("\npre-app::\n\t$$\(MAKE) -f c_src/Makefile.erlang.mk\n"),
-                       PortSpecWrite(io_lib:format("ERL_CFLAGS = -finline-functions -Wall -fPIC -I ~s/erts-~s/include -I ~s\n",
+                       PortSpecWrite(io_lib:format("ERL_CFLAGS = -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
                                [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
-                       PortSpecWrite(io_lib:format("ERL_LDFLAGS = -L ~s -lerl_interface -lei\n",
+                       PortSpecWrite(io_lib:format("ERL_LDFLAGS = -L \\"~s\\" -lerl_interface -lei\n",
                                [code:lib_dir(erl_interface, lib)])),
                        [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
                        FilterEnv = fun(Env) ->
@@ -4419,9 +4445,10 @@ define dep_autopatch_rebar.erl
                                        Output, ": $$\(foreach ext,.c .C .cc .cpp,",
                                                "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
                                        "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
-                                       case filename:extension(Output) of
-                                               [] -> "\n";
-                                               _ -> " -shared\n"
+                                       case {filename:extension(Output), $(PLATFORM)} of
+                                           {[], _} -> "\n";
+                                           {_, darwin} -> "\n";
+                                           _ -> " -shared\n"
                                        end])
                        end,
                        [PortSpec(S) || S <- PortSpecs]
@@ -4490,6 +4517,15 @@ define dep_autopatch_app.erl
        halt()
 endef
 
+define dep_autopatch_appsrc_script.erl
+       AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+       AppSrcScript = AppSrc ++ ".script",
+       Bindings = erl_eval:new_bindings(),
+       {ok, Conf} = file:script(AppSrcScript, Bindings),
+       ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+       halt()
+endef
+
 define dep_autopatch_appsrc.erl
        AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
        AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
@@ -4576,10 +4612,11 @@ $(DEPS_DIR)/$(call dep_name,$1):
                exit 17; \
        fi
        $(verbose) mkdir -p $(DEPS_DIR)
-       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$1)),$1)
-       $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure.ac -o -f $(DEPS_DIR)/$(DEP_NAME)/configure.in ]; then \
-               echo " AUTO  " $(DEP_STR); \
-               cd $(DEPS_DIR)/$(DEP_NAME) && autoreconf -Wall -vif -I m4; \
+       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+       $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+                       && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+               echo " AUTO  " $(1); \
+               cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
        fi
        - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
                echo " CONF  " $(DEP_STR); \
@@ -4664,6 +4701,7 @@ $(foreach p,$(DEP_PLUGINS),\
 DTL_FULL_PATH ?=
 DTL_PATH ?= templates/
 DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
 
 # Verbosity.
 
@@ -4672,28 +4710,10 @@ dtl_verbose = $(dtl_verbose_$(V))
 
 # Core targets.
 
-define erlydtl_compile.erl
-       [begin
-               Module0 = case "$(strip $(DTL_FULL_PATH))" of
-                       "" ->
-                               filename:basename(F, ".dtl");
-                       _ ->
-                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
-                               re:replace(F2, "/",  "_",  [{return, list}, global])
-               end,
-               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
-               case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
-                       ok -> ok;
-                       {ok, _} -> ok
-               end
-       end || F <- string:tokens("$(1)", " ")],
-       halt().
-endef
-
-ifneq ($(wildcard src/),)
-
 DTL_FILES = $(sort $(call core_find,$(DTL_PATH),*.dtl))
 
+ifneq ($(DTL_FILES),)
+
 ifdef DTL_FULL_PATH
 BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(subst /,_,$(DTL_FILES:$(DTL_PATH)%=%))))
 else
@@ -4701,7 +4721,7 @@ BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(notdir $(DTL_FILES
 endif
 
 ifneq ($(words $(DTL_FILES)),0)
-# Rebuild everything when the Makefile changes.
+# Rebuild templates when the Makefile changes.
 $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
        @mkdir -p $(ERLANG_MK_TMP)
        @if test -f $@; then \
@@ -4712,9 +4732,28 @@ $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
 ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
 endif
 
-ebin/$(PROJECT).app:: $(DTL_FILES)
+define erlydtl_compile.erl
+       [begin
+               Module0 = case "$(strip $(DTL_FULL_PATH))" of
+                       "" ->
+                               filename:basename(F, ".dtl");
+                       _ ->
+                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
+                               re:replace(F2, "/",  "_",  [{return, list}, global])
+               end,
+               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+               case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
+                       ok -> ok;
+                       {ok, _} -> ok
+               end
+       end || F <- string:tokens("$(1)", " ")],
+       halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
        $(if $(strip $?),\
-               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?,-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/)))
+               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?),-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/))
+
 endif
 
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
@@ -4810,7 +4849,7 @@ app:: clean deps $(PROJECT).d
        $(verbose) $(MAKE) --no-print-directory app-build
 endif
 
-ifeq ($(wildcard src/$(PROJECT)_app.erl),)
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
 define app_file
 {application, $(PROJECT), [
        {description, "$(PROJECT_DESCRIPTION)"},
@@ -4830,7 +4869,7 @@ define app_file
        {modules, [$(call comma_list,$(2))]},
        {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
        {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS))]},
-       {mod, {$(PROJECT)_app, []}}
+       {mod, {$(PROJECT_MOD), []}}
 ]}.
 endef
 endif
@@ -4888,51 +4927,79 @@ $(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
 # Erlang and Core Erlang files.
 
 define makedep.erl
+       E = ets:new(makedep, [bag]),
+       G = digraph:new([acyclic]),
        ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
-       Modules = [{filename:basename(F, ".erl"), F} || F <- ErlFiles],
-       Add = fun (Dep, Acc) ->
-               case lists:keyfind(atom_to_list(Dep), 1, Modules) of
-                       {_, DepFile} -> [DepFile|Acc];
-                       false -> Acc
+       Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+       Add = fun (Mod, Dep) ->
+               case lists:keyfind(Dep, 1, Modules) of
+                       false -> ok;
+                       {_, DepFile} ->
+                               {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+                               ets:insert(E, {ModFile, DepFile}),
+                               digraph:add_vertex(G, Mod),
+                               digraph:add_vertex(G, Dep),
+                               digraph:add_edge(G, Mod, Dep)
                end
        end,
-       AddHd = fun (Dep, Acc) ->
-               case {Dep, lists:keymember(Dep, 2, Modules)} of
-                       {"src/" ++ _, false} -> [Dep|Acc];
-                       {"include/" ++ _, false} -> [Dep|Acc];
-                       _ -> Acc
+       AddHd = fun (F, Mod, DepFile) ->
+               case file:open(DepFile, [read]) of
+                       {error, enoent} -> ok;
+                       {ok, Fd} ->
+                               F(F, Fd, Mod),
+                               {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+                               ets:insert(E, {ModFile, DepFile})
                end
        end,
-       CompileFirst = fun (Deps) ->
-               First0 = [case filename:extension(D) of
-                       ".erl" -> filename:basename(D, ".erl");
-                       _ -> []
-               end || D <- Deps],
-               case lists:usort(First0) of
-                       [] -> [];
-                       [[]] -> [];
-                       First -> ["COMPILE_FIRST +=", [[" ", F] || F <- First], "\n"]
-               end
+       Attr = fun
+               (F, Mod, behavior, Dep) -> Add(Mod, Dep);
+               (F, Mod, behaviour, Dep) -> Add(Mod, Dep);
+               (F, Mod, compile, {parse_transform, Dep}) -> Add(Mod, Dep);
+               (F, Mod, compile, Opts) when is_list(Opts) ->
+                       case proplists:get_value(parse_transform, Opts) of
+                               undefined -> ok;
+                               Dep -> Add(Mod, Dep)
+                       end;
+               (F, Mod, include, Hrl) ->
+                       case filelib:is_file("include/" ++ Hrl) of
+                               true -> AddHd(F, Mod, "include/" ++ Hrl);
+                               false ->
+                                       case filelib:is_file("src/" ++ Hrl) of
+                                               true -> AddHd(F, Mod, "src/" ++ Hrl);
+                                               false -> false
+                                       end
+                       end;
+               (F, Mod, include_lib, "$1/include/" ++ Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+               (F, Mod, include_lib, Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+               (F, Mod, import, {Imp, _}) ->
+                       case filelib:is_file("src/" ++ atom_to_list(Imp) ++ ".erl") of
+                               false -> ok;
+                               true -> Add(Mod, Imp)
+                       end;
+               (_, _, _, _) -> ok
        end,
-       Depend = [begin
-               case epp:parse_file(F, ["include/"], []) of
-                       {ok, Forms} ->
-                               Deps = lists:usort(lists:foldl(fun
-                                       ({attribute, _, behavior, Dep}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, behaviour, Dep}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, compile, {parse_transform, Dep}}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, file, {Dep, _}}, Acc) -> AddHd(Dep, Acc);
-                                       (_, Acc) -> Acc
-                               end, [], Forms)),
-                               case Deps of
-                                       [] -> "";
-                                       _ -> [F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n", CompileFirst(Deps)]
-                               end;
-                       {error, enoent} ->
-                               []
+       MakeDepend = fun(F, Fd, Mod) ->
+               case io:parse_erl_form(Fd, undefined) of
+                       {ok, {attribute, _, Key, Value}, _} ->
+                               Attr(F, Mod, Key, Value),
+                               F(F, Fd, Mod);
+                       {eof, _} ->
+                               file:close(Fd);
+                       _ ->
+                               F(F, Fd, Mod)
                end
+       end,
+       [begin
+               Mod = list_to_atom(filename:basename(F, ".erl")),
+               {ok, Fd} = file:open(F, [read]),
+               MakeDepend(MakeDepend, Fd, Mod)
        end || F <- ErlFiles],
-       ok = file:write_file("$(1)", Depend),
+       Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+       CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+       ok = file:write_file("$(1)", [
+               [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+               "\nCOMPILE_FIRST +=", [[" ", atom_to_list(CF)] || CF <- CompileFirst], "\n"
+       ]),
        halt()
 endef
 
@@ -4977,13 +5044,13 @@ ifeq ($(wildcard src/$(PROJECT).app.src),)
        $(app_verbose) printf "$(subst $(newline),\n,$(subst ",\",$(call app_file,$(GITDESCRIBE),$(MODULES))))" \
                > ebin/$(PROJECT).app
 else
-       $(verbose) if [ -z "$$(grep -E '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+       $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
                echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk README for instructions." >&2; \
                exit 1; \
        fi
        $(appsrc_verbose) cat src/$(PROJECT).app.src \
                | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
-               | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(GITDESCRIBE)\"}/" \
+               | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
                > ebin/$(PROJECT).app
 endif
 
@@ -5069,6 +5136,11 @@ test-dir:
                $(call core_find,$(TEST_DIR)/,*.erl) -pa ebin/
 endif
 
+ifeq ($(wildcard src),)
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: clean deps test-deps
+       $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+else
 ifeq ($(wildcard ebin/test),)
 test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
 test-build:: clean deps test-deps $(PROJECT).d
@@ -5086,6 +5158,7 @@ clean-test-dir:
 ifneq ($(wildcard $(TEST_DIR)/*.beam),)
        $(gen_verbose) rm -f $(TEST_DIR)/*.beam
 endif
+endif
 
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -5095,7 +5168,7 @@ endif
 # We strip out -Werror because we don't want to fail due to
 # warnings when used as a dependency.
 
-compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/')
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
 
 define compat_convert_erlc_opts
 $(if $(filter-out -Werror,$1),\
@@ -5103,11 +5176,18 @@ $(if $(filter-out -Werror,$1),\
                $(shell echo $1 | cut -b 2-)))
 endef
 
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
 define compat_rebar_config
-{deps, [$(call comma_list,$(foreach d,$(DEPS),\
-       {$(call dep_name,$d),".*",{git,"$(call dep_repo,$d)","$(call dep_commit,$d)"}}))]}.
-{erl_opts, [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$(ERLC_OPTS)),\
-       $(call compat_convert_erlc_opts,$o)))]}.
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+       $(if $(filter hex,$(call dep_fetch,$d)),\
+               {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+               {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
 endef
 
 $(eval _compat_rebar_config = $$(compat_rebar_config))
@@ -5126,12 +5206,12 @@ MAN_SECTIONS ?= 3 7
 
 docs:: asciidoc
 
-asciidoc: distclean-asciidoc doc-deps asciidoc-guide asciidoc-manual
+asciidoc: asciidoc-guide asciidoc-manual
 
 ifeq ($(wildcard doc/src/guide/book.asciidoc),)
 asciidoc-guide:
 else
-asciidoc-guide:
+asciidoc-guide: distclean-asciidoc doc-deps
        a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
        a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
 endif
@@ -5139,7 +5219,7 @@ endif
 ifeq ($(wildcard doc/src/manual/*.asciidoc),)
 asciidoc-manual:
 else
-asciidoc-manual:
+asciidoc-manual: distclean-asciidoc doc-deps
        for f in doc/src/manual/*.asciidoc ; do \
                a2x -v -f manpage $$f ; \
        done
@@ -5154,7 +5234,7 @@ install-docs:: install-asciidoc
 install-asciidoc: asciidoc-manual
        for s in $(MAN_SECTIONS); do \
                mkdir -p $(MAN_INSTALL_PATH)/man$$s/ ; \
-               install -g 0 -o 0 -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
+               install -g `id -u` -o `id -g` -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
        done
 endif
 
@@ -5176,8 +5256,8 @@ help::
                "  bootstrap          Generate a skeleton of an OTP application" \
                "  bootstrap-lib      Generate a skeleton of an OTP library" \
                "  bootstrap-rel      Generate the files needed to build a release" \
-               "  new-app n=NAME     Create a new local OTP application NAME" \
-               "  new-lib n=NAME     Create a new local OTP library NAME" \
+               "  new-app in=NAME    Create a new local OTP application NAME" \
+               "  new-lib in=NAME    Create a new local OTP library NAME" \
                "  new t=TPL n=NAME   Generate a module NAME based on the template TPL" \
                "  new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
                "  list-templates     List available templates"
@@ -5214,6 +5294,8 @@ define bs_appsrc_lib
 ]}.
 endef
 
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
 ifdef SP
 define bs_Makefile
 PROJECT = $p
@@ -5223,17 +5305,21 @@ PROJECT_VERSION = 0.0.1
 # Whitespace to be used when creating files from templates.
 SP = $(SP)
 
-include erlang.mk
 endef
 else
 define bs_Makefile
 PROJECT = $p
-include erlang.mk
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
 endef
 endif
 
 define bs_apps_Makefile
 PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
 include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)/erlang.mk
 endef
 
@@ -5331,6 +5417,11 @@ code_change(_OldVsn, State, _Extra) ->
        {ok, State}.
 endef
 
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
 define tpl_cowboy_http
 -module($(n)).
 -behaviour(cowboy_http_handler).
@@ -5527,6 +5618,7 @@ endif
        $(eval p := $(PROJECT))
        $(eval n := $(PROJECT)_sup)
        $(call render_template,bs_Makefile,Makefile)
+       $(verbose) echo "include erlang.mk" >> Makefile
        $(verbose) mkdir src/
 ifdef LEGACY
        $(call render_template,bs_appsrc,src/$(PROJECT).app.src)
@@ -5540,6 +5632,7 @@ ifneq ($(wildcard src/),)
 endif
        $(eval p := $(PROJECT))
        $(call render_template,bs_Makefile,Makefile)
+       $(verbose) echo "include erlang.mk" >> Makefile
        $(verbose) mkdir src/
 ifdef LEGACY
        $(call render_template,bs_appsrc_lib,src/$(PROJECT).app.src)
@@ -5620,12 +5713,33 @@ list-templates:
 
 C_SRC_DIR ?= $(CURDIR)/c_src
 C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
-C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT).so
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
 C_SRC_TYPE ?= shared
 
 # System type and C compiler/flags.
 
-ifeq ($(PLATFORM),darwin)
+ifeq ($(PLATFORM),msys2)
+       C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+       C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+       C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+       C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+       C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+       C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+       CC = /mingw64/bin/gcc
+       export CC
+       CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
        CC ?= cc
        CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes
        CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall
@@ -5640,10 +5754,15 @@ else ifeq ($(PLATFORM),linux)
        CXXFLAGS ?= -O3 -finline-functions -Wall
 endif
 
-CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
-CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+ifneq ($(PLATFORM),msys2)
+       CFLAGS += -fPIC
+       CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
 
-LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lerl_interface -lei
 
 # Verbosity.
 
@@ -5680,15 +5799,15 @@ OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
 COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
 COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
 
-app:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
 
-test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
 
-$(C_SRC_OUTPUT): $(OBJECTS)
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
        $(verbose) mkdir -p priv/
        $(link_verbose) $(CC) $(OBJECTS) \
                $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
-               -o $(C_SRC_OUTPUT)
+               -o $(C_SRC_OUTPUT_FILE)
 
 %.o: %.c
        $(COMPILE_C) $(OUTPUT_OPTION) $<
@@ -5705,13 +5824,13 @@ $(C_SRC_OUTPUT): $(OBJECTS)
 clean:: clean-c_src
 
 clean-c_src:
-       $(gen_verbose) rm -f $(C_SRC_OUTPUT) $(OBJECTS)
+       $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
 
 endif
 
 ifneq ($(wildcard $(C_SRC_DIR)),)
 $(C_SRC_ENV):
-       $(verbose) $(ERL) -eval "file:write_file(\"$(C_SRC_ENV)\", \
+       $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
                io_lib:format( \
                        \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
                        \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
@@ -5889,7 +6008,7 @@ endif
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
-.PHONY: ct distclean-ct
+.PHONY: ct apps-ct distclean-ct
 
 # Configuration.
 
@@ -5919,22 +6038,44 @@ help::
 CT_RUN = ct_run \
        -no_auto_compile \
        -noinput \
-       -pa $(CURDIR)/ebin $(DEPS_DIR)/*/ebin $(TEST_DIR) \
+       -pa $(CURDIR)/ebin $(DEPS_DIR)/*/ebin $(APPS_DIR)/*/ebin $(TEST_DIR) \
        -dir $(TEST_DIR) \
        -logdir $(CURDIR)/logs
 
 ifeq ($(CT_SUITES),)
-ct:
+ct: $(if $(IS_APP),,apps-ct)
 else
-ct: test-build
+ct: test-build $(if $(IS_APP),,apps-ct)
        $(verbose) mkdir -p $(CURDIR)/logs/
-       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+       $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1:
+       $(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: test-build $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifndef t
+CT_EXTRA =
+else
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
 endif
 
 define ct_suite_target
 ct-$(1): test-build
        $(verbose) mkdir -p $(CURDIR)/logs/
-       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_OPTS)
+       $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
 endef
 
 $(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
@@ -5953,9 +6094,8 @@ DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
 export DIALYZER_PLT
 
 PLT_APPS ?=
-DIALYZER_DIRS ?= --src -r src
-DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \
-       -Wunmatched_returns # -Wunderspecs
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
 
 # Core targets.
 
@@ -5971,6 +6111,18 @@ help::
 
 # Plugin-specific targets.
 
+define filter_opts.erl
+       Opts = binary:split(<<"$1">>, <<"-">>, [global]),
+       Filtered = lists:reverse(lists:foldl(fun
+               (O = <<"pa ", _/bits>>, Acc) -> [O|Acc];
+               (O = <<"D ", _/bits>>, Acc) -> [O|Acc];
+               (O = <<"I ", _/bits>>, Acc) -> [O|Acc];
+               (_, Acc) -> Acc
+       end, [], Opts)),
+       io:format("~s~n", [[["-", O] || O <- Filtered]]),
+       halt().
+endef
+
 $(DIALYZER_PLT): deps app
        $(verbose) dialyzer --build_plt --apps erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS)
 
@@ -5984,7 +6136,7 @@ dialyze:
 else
 dialyze: $(DIALYZER_PLT)
 endif
-       $(verbose) dialyzer --no_native $(DIALYZER_DIRS) $(DIALYZER_OPTS)
+       $(verbose) dialyzer --no_native `$(call erlang,$(call filter_opts.erl,$(ERLC_OPTS)))` $(DIALYZER_DIRS) $(DIALYZER_OPTS)
 
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -5997,58 +6149,20 @@ EDOC_OPTS ?=
 
 # Core targets.
 
-docs:: distclean-edoc edoc
+ifneq ($(wildcard doc/overview.edoc),)
+docs:: edoc
+endif
 
 distclean:: distclean-edoc
 
 # Plugin-specific targets.
 
-edoc: doc-deps
+edoc: distclean-edoc doc-deps
        $(gen_verbose) $(ERL) -eval 'edoc:application($(PROJECT), ".", [$(EDOC_OPTS)]), halt().'
 
 distclean-edoc:
        $(gen_verbose) rm -f doc/*.css doc/*.html doc/*.png doc/edoc-info
 
-# Copyright (c) 2015, Erlang Solutions Ltd.
-# This file is part of erlang.mk and subject to the terms of the ISC License.
-
-.PHONY: elvis distclean-elvis
-
-# Configuration.
-
-ELVIS_CONFIG ?= $(CURDIR)/elvis.config
-
-ELVIS ?= $(CURDIR)/elvis
-export ELVIS
-
-ELVIS_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis
-ELVIS_CONFIG_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis.config
-ELVIS_OPTS ?=
-
-# Core targets.
-
-help::
-       $(verbose) printf "%s\n" "" \
-               "Elvis targets:" \
-               "  elvis       Run Elvis using the local elvis.config or download the default otherwise"
-
-distclean:: distclean-elvis
-
-# Plugin-specific targets.
-
-$(ELVIS):
-       $(gen_verbose) $(call core_http_get,$(ELVIS),$(ELVIS_URL))
-       $(verbose) chmod +x $(ELVIS)
-
-$(ELVIS_CONFIG):
-       $(verbose) $(call core_http_get,$(ELVIS_CONFIG),$(ELVIS_CONFIG_URL))
-
-elvis: $(ELVIS) $(ELVIS_CONFIG)
-       $(verbose) $(ELVIS) rock -c $(ELVIS_CONFIG) $(ELVIS_OPTS)
-
-distclean-elvis:
-       $(gen_verbose) rm -rf $(ELVIS)
-
 # Copyright (c) 2014 Dave Cottlehuber <dch@skunkwerks.at>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
@@ -6057,6 +6171,8 @@ distclean-elvis:
 # Configuration.
 
 ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
 ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
 
 ESCRIPT_BEAMS ?= "ebin/*", "deps/*/ebin/*"
@@ -6102,7 +6218,7 @@ define ESCRIPT_RAW
 '  ]),'\
 '  file:change_mode(Escript, 8#755)'\
 'end,'\
-'Ez("$(ESCRIPT_NAME)"),'\
+'Ez("$(ESCRIPT_FILE)"),'\
 'halt().'
 endef
 
@@ -6114,6 +6230,75 @@ escript:: distclean-escript deps app
 distclean-escript:
        $(gen_verbose) rm -f $(ESCRIPT_NAME)
 
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "EUnit targets:" \
+               "  eunit       Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+       case "$(COVER)" of
+               "" -> ok;
+               _ ->
+                       case cover:compile_beam_directory("ebin") of
+                               {error, _} -> halt(1);
+                               _ -> ok
+                       end
+       end,
+       case eunit:test($1, [$(EUNIT_OPTS)]) of
+               ok -> ok;
+               error -> halt(2)
+       end,
+       case "$(COVER)" of
+               "" -> ok;
+               _ ->
+                       cover:export("eunit.coverdata")
+       end,
+       halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(DEPS_DIR)/*/ebin $(APPS_DIR)/*/ebin $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build
+       $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build
+       $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+       $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP),,apps-eunit)
+       $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit:
+       $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; done
+endif
+endif
+
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
@@ -6124,7 +6309,7 @@ distclean-escript:
 RELX ?= $(CURDIR)/relx
 RELX_CONFIG ?= $(CURDIR)/relx.config
 
-RELX_URL ?= https://github.com/erlware/relx/releases/download/v3.5.0/relx
+RELX_URL ?= https://github.com/erlware/relx/releases/download/v3.19.0/relx
 RELX_OPTS ?=
 RELX_OUTPUT_DIR ?= _rel
 
@@ -6392,7 +6577,8 @@ define cover_report.erl
                true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
        TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
        TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
-       TotalPerc = round(100 * TotalY / (TotalY + TotalN)),
+       Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+       TotalPerc = Perc(TotalY, TotalN),
        {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
        io:format(F, "<!DOCTYPE html><html>~n"
                "<head><meta charset=\"UTF-8\">~n"
@@ -6402,7 +6588,7 @@ define cover_report.erl
        io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
        [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
                "<td>~p%</td></tr>~n",
-               [M, M, round(100 * Y / (Y + N))]) || {M, {Y, N}} <- Report1],
+               [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
        How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
        Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
        io:format(F, "</table>~n"
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index ba6da91a5feb637d2c141bb0b8b0d56fd921e861..ccb2719118f03b4a51830d77ce1077c373aaf83d 100644 (file)
@@ -40,7 +40,7 @@ start_child(X) ->
     case mirrored_supervisor:start_child(
            ?SUPERVISOR,
            {id(X), {rabbit_federation_link_sup, start_link, [X]},
-            transient, ?MAX_WAIT, supervisor,
+            transient, ?SUPERVISOR_WAIT, supervisor,
             [rabbit_federation_link_sup]}) of
         {ok, _Pid}             -> ok;
         %% A link returned {stop, gone}, the link_sup shut down, that's OK.
index 8345eb540763349a1c93abbf4c3b2b7db06f4769..402fe30d570097ad6c831a9e2513f3320b7025fc 100644 (file)
@@ -100,12 +100,12 @@ specs(XorQ) ->
 
 spec(U = #upstream{reconnect_delay = Delay}, #exchange{name = XName}) ->
     {U, {rabbit_federation_exchange_link, start_link, [{U, XName}]},
-     {permanent, Delay}, ?MAX_WAIT, worker,
+     {permanent, Delay}, ?WORKER_WAIT, worker,
      [rabbit_federation_link]};
 
 spec(Upstream = #upstream{reconnect_delay = Delay}, Q = #amqqueue{}) ->
     {Upstream, {rabbit_federation_queue_link, start_link, [{Upstream, Q}]},
-     {permanent, Delay}, ?MAX_WAIT, worker,
+     {permanent, Delay}, ?WORKER_WAIT, worker,
      [rabbit_federation_queue_link]}.
 
 name(#exchange{name = XName}) -> XName;
index 3dba50f756d220ce4aed13d33d35a63167694335..f5b6a342dc3910946110d7c33ba52a1216da9514 100644 (file)
@@ -38,7 +38,7 @@ start_child(Q) ->
     case supervisor2:start_child(
            ?SUPERVISOR,
            {id(Q), {rabbit_federation_link_sup, start_link, [Q]},
-            transient, ?MAX_WAIT, supervisor,
+            transient, ?SUPERVISOR_WAIT, supervisor,
             [rabbit_federation_link_sup]}) of
         {ok, _Pid}             -> ok;
         %% A link returned {stop, gone}, the link_sup shut down, that's OK.
index cfcaeb4634477da3c9c4705fa0529402f4320a97..f19e8c7aa58eb1d4347f5454471a834842d77295 100644 (file)
@@ -55,14 +55,14 @@ stop() ->
 
 init([]) ->
     Status = {status, {rabbit_federation_status, start_link, []},
-              transient, ?MAX_WAIT, worker,
+              transient, ?WORKER_WAIT, worker,
               [rabbit_federation_status]},
     XLinkSupSup = {x_links,
                    {rabbit_federation_exchange_link_sup_sup, start_link, []},
-                   transient, ?MAX_WAIT, supervisor,
+                   transient, ?SUPERVISOR_WAIT, supervisor,
                    [rabbit_federation_exchange_link_sup_sup]},
     QLinkSupSup = {q_links,
                    {rabbit_federation_queue_link_sup_sup, start_link, []},
-                  transient, ?MAX_WAIT, supervisor,
+                  transient, ?SUPERVISOR_WAIT, supervisor,
                   [rabbit_federation_queue_link_sup_sup]},
     {ok, {{one_for_one, 3, 10}, [Status, XLinkSupSup, QLinkSupSup]}}.
index d6223d68ff7637360d88b8874367003df55fbce5..3e00500b0aebb80dffa34d38b636e5e342289270 100644 (file)
@@ -71,10 +71,7 @@ remove_credentials(URI) ->
     list_to_binary(amqp_uri:remove_credentials(binary_to_list(URI))).
 
 to_params(Upstream = #upstream{uris = URIs}, XorQ) ->
-    random:seed(erlang:phash2([node()]),
-                time_compat:monotonic_time(),
-                time_compat:unique_integer()),
-    URI = lists:nth(random:uniform(length(URIs)), URIs),
+    URI = lists:nth(rand_compat:uniform(length(URIs)), URIs),
     {ok, Params} = amqp_uri:parse(binary_to_list(URI), vhost(XorQ)),
     XorQ1 = with_name(Upstream, vhost(Params), XorQ),
     SafeURI = remove_credentials(URI),
index cae5439423b2b69f460322d7f5933dc47db28ebe..75d409ad86c19ef415b570da4e156f031b42c28b 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_federation,
  [{description, "RabbitMQ Federation"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {mod, {rabbit_federation_app, []}},
diff --git a/rabbitmq-server/deps/rabbitmq_federation/test/exchange_SUITE.erl b/rabbitmq-server/deps/rabbitmq_federation/test/exchange_SUITE.erl
new file mode 100644 (file)
index 0000000..16fe2c9
--- /dev/null
@@ -0,0 +1,885 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(exchange_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-include("rabbit_federation.hrl").
+
+-compile(export_all).
+
+-import(rabbit_federation_test_util,
+        [expect/3, expect_empty/2,
+         set_upstream/4, clear_upstream/3, set_upstream_set/4,
+         set_policy/5, clear_policy/3,
+         set_policy_upstream/5, set_policy_upstreams/4,
+         no_plugins/1]).
+
+-define(UPSTREAM_DOWNSTREAM, [x(<<"upstream">>),
+                              x(<<"fed.downstream">>)]).
+
+all() ->
+    [
+      {group, without_disambiguate},
+      {group, with_disambiguate}
+    ].
+
+groups() ->
+    [
+      {without_disambiguate, [], [
+          {cluster_size_1, [], [
+              simple,
+              multiple_upstreams,
+              multiple_uris,
+              multiple_downstreams,
+              e2e,
+              unbind_on_delete,
+              unbind_on_unbind,
+              unbind_gets_transmitted,
+              no_loop,
+              dynamic_reconfiguration,
+              dynamic_reconfiguration_integrity,
+              federate_unfederate,
+              dynamic_plugin_stop_start
+            ]}
+        ]},
+      {with_disambiguate, [], [
+          {cluster_size_1, [], [
+              binding_recovery
+            ]},
+          {cluster_size_2, [], [
+              user_id,
+              restart_upstream,
+              cycle_detection
+            ]},
+          {cluster_size_3, [], [
+              max_hops,
+              binding_propagation
+            ]},
+
+          {without_plugins, [], [
+              {cluster_size_2, [], [
+                  upstream_has_no_federation
+                ]}
+            ]}
+        ]}
+    ].
+
+suite() ->
+    [{timetrap, {minutes, 5}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(without_disambiguate, Config) ->
+    rabbit_ct_helpers:set_config(Config,
+      {disambiguate_step, []});
+init_per_group(with_disambiguate, Config) ->
+    rabbit_ct_helpers:set_config(Config,
+      {disambiguate_step, [fun rabbit_federation_test_util:disambiguate/1]});
+init_per_group(without_plugins, Config) ->
+    rabbit_ct_helpers:set_config(Config,
+      {broker_with_plugins, [true, false]});
+init_per_group(cluster_size_1 = Group, Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 1}
+      ]),
+    init_per_group1(Group, Config1);
+init_per_group(cluster_size_2 = Group, Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 2}
+      ]),
+    init_per_group1(Group, Config1);
+init_per_group(cluster_size_3 = Group, Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 3}
+      ]),
+    init_per_group1(Group, Config1).
+
+init_per_group1(Group, Config) ->
+    SetupFederation = case Group of
+        cluster_size_1 -> [fun rabbit_federation_test_util:setup_federation/1];
+        cluster_size_2 -> [];
+        cluster_size_3 -> []
+    end,
+    Disambiguate = ?config(disambiguate_step, Config),
+    Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, Suffix},
+        {rmq_nodes_clustered, false}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps() ++
+      SetupFederation ++ Disambiguate).
+
+end_per_group(without_disambiguate, Config) ->
+    Config;
+end_per_group(with_disambiguate, Config) ->
+    Config;
+end_per_group(without_plugins, Config) ->
+    Config;
+end_per_group(_, Config) ->
+    rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+simple(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              Q = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+              await_binding(Config, 0, <<"upstream">>, <<"key">>),
+              publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO">>)
+      end, ?UPSTREAM_DOWNSTREAM).
+
+multiple_upstreams(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              Q = bind_queue(Ch, <<"fed12.downstream">>, <<"key">>),
+              await_binding(Config, 0, <<"upstream">>, <<"key">>),
+              await_binding(Config, 0, <<"upstream2">>, <<"key">>),
+              publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO1">>),
+              publish_expect(Ch, <<"upstream2">>, <<"key">>, Q, <<"HELLO2">>)
+      end, [x(<<"upstream">>),
+            x(<<"upstream2">>),
+            x(<<"fed12.downstream">>)]).
+
+multiple_uris(Config) ->
+    %% We can't use a direct connection for Kill() to work.
+    URIs = [
+      rabbit_ct_broker_helpers:node_uri(Config, 0),
+      rabbit_ct_broker_helpers:node_uri(Config, 0, [use_ipaddr])
+    ],
+    set_upstream(Config, 0, <<"localhost">>, URIs),
+    WithCh = fun(F) ->
+                     Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+                     F(Ch),
+                     rabbit_ct_client_helpers:close_channels_and_connection(
+                       Config, 0)
+             end,
+    WithCh(fun (Ch) -> declare_all(Ch, ?UPSTREAM_DOWNSTREAM) end),
+    expect_uris(Config, 0, URIs),
+    WithCh(fun (Ch) -> delete_all(Ch, ?UPSTREAM_DOWNSTREAM) end),
+    %% Put back how it was
+    rabbit_federation_test_util:setup_federation(Config),
+    ok.
+
+expect_uris(_, _, []) ->
+    ok;
+expect_uris(Config, Node, URIs) ->
+    [Link] = rabbit_ct_broker_helpers:rpc(Config, Node,
+      rabbit_federation_status, status, []),
+    URI = rabbit_misc:pget(uri, Link),
+    kill_only_connection(Config, Node),
+    expect_uris(Config, Node, URIs -- [URI]).
+
+kill_only_connection(Config, Node) ->
+    case connection_pids(Config, Node) of
+        [Pid] -> catch rabbit_ct_broker_helpers:rpc(Config, Node,
+                   rabbit_networking, close_connection, [Pid, "boom"]), %% [1]
+                 wait_for_pid_to_die(Config, Node, Pid);
+        _     -> timer:sleep(100),
+                 kill_only_connection(Config, Node)
+    end.
+
+%% [1] the catch is because we could still see a connection from a
+%% previous time round. If so that's fine (we'll just loop around
+%% again) but we don't want the test to fail because a connection
+%% closed as we were trying to close it.
+
+wait_for_pid_to_die(Config, Node, Pid) ->
+    case connection_pids(Config, Node) of
+        [Pid] -> timer:sleep(100),
+                 wait_for_pid_to_die(Config, Node, Pid);
+        _     -> ok
+    end.
+
+
+multiple_downstreams(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              Q1 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+              Q12 = bind_queue(Ch, <<"fed12.downstream2">>, <<"key">>),
+              await_binding(Config, 0, <<"upstream">>, <<"key">>, 2),
+              await_binding(Config, 0, <<"upstream2">>, <<"key">>),
+              publish(Ch, <<"upstream">>, <<"key">>, <<"HELLO1">>),
+              publish(Ch, <<"upstream2">>, <<"key">>, <<"HELLO2">>),
+              expect(Ch, Q1, [<<"HELLO1">>]),
+              expect(Ch, Q12, [<<"HELLO1">>, <<"HELLO2">>])
+      end, ?UPSTREAM_DOWNSTREAM ++
+          [x(<<"upstream2">>),
+           x(<<"fed12.downstream2">>)]).
+
+e2e(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              bind_exchange(Ch, <<"downstream2">>, <<"fed.downstream">>,
+                            <<"key">>),
+              await_binding(Config, 0, <<"upstream">>, <<"key">>),
+              Q = bind_queue(Ch, <<"downstream2">>, <<"key">>),
+              publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO1">>)
+      end, ?UPSTREAM_DOWNSTREAM ++ [x(<<"downstream2">>)]).
+
+unbind_on_delete(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              Q1 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+              Q2 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+              await_binding(Config, 0, <<"upstream">>, <<"key">>),
+              delete_queue(Ch, Q2),
+              publish_expect(Ch, <<"upstream">>, <<"key">>, Q1, <<"HELLO">>)
+      end, ?UPSTREAM_DOWNSTREAM).
+
+unbind_on_unbind(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              Q1 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+              Q2 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
+              await_binding(Config, 0, <<"upstream">>, <<"key">>),
+              unbind_queue(Ch, Q2, <<"fed.downstream">>, <<"key">>),
+              publish_expect(Ch, <<"upstream">>, <<"key">>, Q1, <<"HELLO">>),
+              delete_queue(Ch, Q2)
+      end, ?UPSTREAM_DOWNSTREAM).
+
+user_id(Config) ->
+    [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    set_policy_upstream(Config, Rabbit, <<"^test$">>,
+      rabbit_ct_broker_helpers:node_uri(Config, 1), []),
+    Perm = fun (F, A) ->
+                  ok = rpc:call(Hare,
+                                rabbit_auth_backend_internal, F, A)
+           end,
+    Perm(add_user, [<<"hare-user">>, <<"hare-user">>]),
+    Perm(set_permissions, [<<"hare-user">>,
+                           <<"/">>, <<".*">>, <<".*">>, <<".*">>]),
+
+    Ch = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+    {ok, Conn2} = amqp_connection:start(
+      #amqp_params_network{
+        username = <<"hare-user">>,
+        password = <<"hare-user">>,
+        port     = rabbit_ct_broker_helpers:get_node_config(Config, Hare,
+          tcp_port_amqp)}),
+    {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+    declare_exchange(Ch2, x(<<"test">>)),
+    declare_exchange(Ch, x(<<"test">>)),
+    Q = bind_queue(Ch, <<"test">>, <<"key">>),
+    await_binding(Config, Hare, <<"test">>, <<"key">>),
+
+    Msg = #amqp_msg{props   = #'P_basic'{user_id = <<"hare-user">>},
+                    payload = <<"HELLO">>},
+
+    SafeUri = fun (H) ->
+                      {array, [{table, Recv}]} =
+                          rabbit_misc:table_lookup(
+                            H, <<"x-received-from">>),
+                      URI = rabbit_ct_broker_helpers:node_uri(Config, 1),
+                      {longstr, URI} =
+                         rabbit_misc:table_lookup(Recv, <<"uri">>)
+              end,
+    ExpectUser =
+        fun (ExpUser) ->
+                fun () ->
+                        receive
+                            {#'basic.deliver'{},
+                             #amqp_msg{props   = Props,
+                                       payload = Payload}} ->
+                                #'P_basic'{user_id = ActUser,
+                                           headers = Headers} = Props,
+                                SafeUri(Headers),
+                                <<"HELLO">> = Payload,
+                                ExpUser = ActUser
+                        end
+                end
+        end,
+
+    publish(Ch2, <<"test">>, <<"key">>, Msg),
+    expect(Ch, Q, ExpectUser(undefined)),
+
+    set_policy_upstream(Config, Rabbit, <<"^test$">>,
+      rabbit_ct_broker_helpers:node_uri(Config, 1),
+      [{<<"trust-user-id">>, true}]),
+
+    publish(Ch2, <<"test">>, <<"key">>, Msg),
+    expect(Ch, Q, ExpectUser(<<"hare-user">>)),
+
+    amqp_channel:close(Ch2),
+    amqp_connection:close(Conn2),
+
+    ok.
+
+%% In order to test that unbinds get sent we deliberately set up a
+%% broken config - with topic upstream and fanout downstream. You
+%% shouldn't really do this, but it lets us see "extra" messages that
+%% get sent.
+unbind_gets_transmitted(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              Q11 = bind_queue(Ch, <<"fed.downstream">>, <<"key1">>),
+              Q12 = bind_queue(Ch, <<"fed.downstream">>, <<"key1">>),
+              Q21 = bind_queue(Ch, <<"fed.downstream">>, <<"key2">>),
+              Q22 = bind_queue(Ch, <<"fed.downstream">>, <<"key2">>),
+              await_binding(Config, 0, <<"upstream">>, <<"key1">>),
+              await_binding(Config, 0, <<"upstream">>, <<"key2">>),
+              [delete_queue(Ch, Q) || Q <- [Q12, Q21, Q22]],
+              publish(Ch, <<"upstream">>, <<"key1">>, <<"YES">>),
+              publish(Ch, <<"upstream">>, <<"key2">>, <<"NO">>),
+              expect(Ch, Q11, [<<"YES">>]),
+              expect_empty(Ch, Q11)
+      end, [x(<<"upstream">>),
+            x(<<"fed.downstream">>)]).
+
+no_loop(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              Q1 = bind_queue(Ch, <<"one">>, <<"key">>),
+              Q2 = bind_queue(Ch, <<"two">>, <<"key">>),
+              await_binding(Config, 0, <<"one">>, <<"key">>, 2),
+              await_binding(Config, 0, <<"two">>, <<"key">>, 2),
+              publish(Ch, <<"one">>, <<"key">>, <<"Hello from one">>),
+              publish(Ch, <<"two">>, <<"key">>, <<"Hello from two">>),
+              expect(Ch, Q1, [<<"Hello from one">>, <<"Hello from two">>]),
+              expect(Ch, Q2, [<<"Hello from one">>, <<"Hello from two">>]),
+              expect_empty(Ch, Q1),
+              expect_empty(Ch, Q2)
+      end, [x(<<"one">>),
+            x(<<"two">>)]).
+
+binding_recovery(Config) ->
+    [Rabbit] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    Q = <<"durable-Q">>,
+    Ch = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+
+    rabbit_federation_test_util:set_upstream(Config,
+      Rabbit, <<"rabbit">>,
+      rabbit_ct_broker_helpers:node_uri(Config, Rabbit)),
+    rabbit_federation_test_util:set_upstream_set(Config,
+      Rabbit, <<"upstream">>,
+      [{<<"rabbit">>, [{<<"exchange">>, <<"upstream">>}]},
+       {<<"rabbit">>, [{<<"exchange">>, <<"upstream2">>}]}]),
+    rabbit_federation_test_util:set_policy(Config,
+      Rabbit, <<"fed">>, <<"^fed\\.">>, <<"upstream">>),
+
+    declare_all(Ch, [x(<<"upstream2">>) | ?UPSTREAM_DOWNSTREAM]),
+    #'queue.declare_ok'{} =
+        amqp_channel:call(Ch, #'queue.declare'{queue   = Q,
+                                               durable = true}),
+    bind_queue(Ch, Q, <<"fed.downstream">>, <<"key">>),
+    timer:sleep(100), %% To get the suffix written
+
+    %% i.e. don't clean up
+    rabbit_ct_client_helpers:close_channels_and_connection(Config, Rabbit),
+    rabbit_ct_broker_helpers:restart_node(Config, Rabbit),
+
+    true = (none =/= suffix(Config, Rabbit, <<"rabbit">>, "upstream")),
+    true = (none =/= suffix(Config, Rabbit, <<"rabbit">>, "upstream2")),
+
+    %% again don't clean up
+    rabbit_ct_broker_helpers:restart_node(Config, Rabbit),
+    Ch3 = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+
+    rabbit_ct_broker_helpers:set_parameter(Config,
+      Rabbit, <<"federation-upstream-set">>, <<"upstream">>,
+      [[{<<"upstream">>, <<"rabbit">>}, {<<"exchange">>, <<"upstream">>}]]),
+
+    publish_expect(Ch3, <<"upstream">>, <<"key">>, Q, <<"HELLO">>),
+    true = (none =/= suffix(Config, Rabbit, <<"rabbit">>, "upstream")),
+    none = suffix(Config, Rabbit, <<"rabbit">>, "upstream2"),
+    delete_all(Ch3, [x(<<"upstream2">>) | ?UPSTREAM_DOWNSTREAM]),
+    delete_queue(Ch3, Q),
+    ok.
+
+suffix(Config, Node, Name, XName) ->
+    rabbit_ct_broker_helpers:rpc(Config, Node,
+      rabbit_federation_db, get_active_suffix,
+             [r(<<"fed.downstream">>),
+              #upstream{name          = Name,
+                        exchange_name = list_to_binary(XName)}, none]).
+
+restart_upstream(Config) ->
+    [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    Downstream = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+    Upstream   = rabbit_ct_client_helpers:open_channel(Config, Hare),
+
+    rabbit_federation_test_util:set_upstream(Config,
+      Rabbit, <<"hare">>, rabbit_ct_broker_helpers:node_uri(Config, 1)),
+    rabbit_federation_test_util:set_upstream_set(Config,
+      Rabbit, <<"upstream">>,
+      [{<<"hare">>, [{<<"exchange">>, <<"upstream">>}]}]),
+    rabbit_federation_test_util:set_policy(Config,
+      Rabbit, <<"hare">>, <<"^hare\\.">>, <<"upstream">>),
+
+    declare_exchange(Upstream, x(<<"upstream">>)),
+    declare_exchange(Downstream, x(<<"hare.downstream">>)),
+
+    Qstays = bind_queue(Downstream, <<"hare.downstream">>, <<"stays">>),
+    Qgoes = bind_queue(Downstream, <<"hare.downstream">>, <<"goes">>),
+
+    rabbit_ct_client_helpers:close_channels_and_connection(Config, Hare),
+    rabbit_ct_broker_helpers:stop_node(Config, Hare),
+
+    Qcomes = bind_queue(Downstream, <<"hare.downstream">>, <<"comes">>),
+    unbind_queue(Downstream, Qgoes, <<"hare.downstream">>, <<"goes">>),
+
+    rabbit_ct_broker_helpers:start_node(Config, Hare),
+    Upstream1 = rabbit_ct_client_helpers:open_channel(Config, Hare),
+
+    %% Wait for the link to come up and for these bindings
+    %% to be transferred
+    await_binding(Config, Hare, <<"upstream">>, <<"comes">>, 1),
+    await_binding_absent(Config, Hare, <<"upstream">>, <<"goes">>),
+    await_binding(Config, Hare, <<"upstream">>, <<"stays">>, 1),
+
+    publish(Upstream1, <<"upstream">>, <<"goes">>, <<"GOES">>),
+    publish(Upstream1, <<"upstream">>, <<"stays">>, <<"STAYS">>),
+    publish(Upstream1, <<"upstream">>, <<"comes">>, <<"COMES">>),
+
+    expect(Downstream, Qstays, [<<"STAYS">>]),
+    expect(Downstream, Qcomes, [<<"COMES">>]),
+    expect_empty(Downstream, Qgoes),
+
+    delete_exchange(Downstream, <<"hare.downstream">>),
+    delete_exchange(Upstream1, <<"upstream">>),
+    ok.
+
+%% flopsy, mopsy and cottontail, connected in a ring with max_hops = 2
+%% for each connection. We should not see any duplicates.
+
+max_hops(Config) ->
+    [Flopsy, Mopsy, Cottontail] = rabbit_ct_broker_helpers:get_node_configs(
+      Config, nodename),
+    [set_policy_upstream(Config, Downstream,
+       <<"^ring$">>,
+       rabbit_ct_broker_helpers:node_uri(Config, Upstream),
+       [{<<"max-hops">>, 2}])
+     || {Downstream, Upstream} <- [{Flopsy, Cottontail},
+                                    {Mopsy, Flopsy},
+                                    {Cottontail, Mopsy}]],
+
+    FlopsyCh     = rabbit_ct_client_helpers:open_channel(Config, Flopsy),
+    MopsyCh      = rabbit_ct_client_helpers:open_channel(Config, Mopsy),
+    CottontailCh = rabbit_ct_client_helpers:open_channel(Config, Cottontail),
+
+    declare_exchange(FlopsyCh,     x(<<"ring">>)),
+    declare_exchange(MopsyCh,      x(<<"ring">>)),
+    declare_exchange(CottontailCh, x(<<"ring">>)),
+
+    Q1 = bind_queue(FlopsyCh,     <<"ring">>, <<"key">>),
+    Q2 = bind_queue(MopsyCh,      <<"ring">>, <<"key">>),
+    Q3 = bind_queue(CottontailCh, <<"ring">>, <<"key">>),
+
+    await_binding(Config, Flopsy,     <<"ring">>, <<"key">>, 3),
+    await_binding(Config, Mopsy,      <<"ring">>, <<"key">>, 3),
+    await_binding(Config, Cottontail, <<"ring">>, <<"key">>, 3),
+
+    publish(FlopsyCh,     <<"ring">>, <<"key">>, <<"HELLO flopsy">>),
+    publish(MopsyCh,      <<"ring">>, <<"key">>, <<"HELLO mopsy">>),
+    publish(CottontailCh, <<"ring">>, <<"key">>, <<"HELLO cottontail">>),
+
+    Msgs = [<<"HELLO flopsy">>, <<"HELLO mopsy">>, <<"HELLO cottontail">>],
+    expect(FlopsyCh,     Q1, Msgs),
+    expect(MopsyCh,      Q2, Msgs),
+    expect(CottontailCh, Q3, Msgs),
+    expect_empty(FlopsyCh,     Q1),
+    expect_empty(MopsyCh,      Q2),
+    expect_empty(CottontailCh, Q3),
+    ok.
+
+%% Two nodes, both federated with each other, and max_hops set to a
+%% high value. Things should not get out of hand.
+cycle_detection(Config) ->
+    [Cycle1, Cycle2] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    [set_policy_upstream(Config, Downstream,
+       <<"^cycle$">>,
+       rabbit_ct_broker_helpers:node_uri(Config, Upstream),
+       [{<<"max-hops">>, 10}])
+     || {Downstream, Upstream} <- [{Cycle1, Cycle2}, {Cycle2, Cycle1}]],
+
+    Cycle1Ch = rabbit_ct_client_helpers:open_channel(Config, Cycle1),
+    Cycle2Ch = rabbit_ct_client_helpers:open_channel(Config, Cycle2),
+
+    declare_exchange(Cycle1Ch, x(<<"cycle">>)),
+    declare_exchange(Cycle2Ch, x(<<"cycle">>)),
+
+    Q1 = bind_queue(Cycle1Ch, <<"cycle">>, <<"key">>),
+    Q2 = bind_queue(Cycle2Ch, <<"cycle">>, <<"key">>),
+
+    %% "key" present twice because once for the local queue and once
+    %% for federation in each case
+    await_binding(Config, Cycle1, <<"cycle">>, <<"key">>, 2),
+    await_binding(Config, Cycle2, <<"cycle">>, <<"key">>, 2),
+
+    publish(Cycle1Ch, <<"cycle">>, <<"key">>, <<"HELLO1">>),
+    publish(Cycle2Ch, <<"cycle">>, <<"key">>, <<"HELLO2">>),
+
+    Msgs = [<<"HELLO1">>, <<"HELLO2">>],
+    expect(Cycle1Ch, Q1, Msgs),
+    expect(Cycle2Ch, Q2, Msgs),
+    expect_empty(Cycle1Ch, Q1),
+    expect_empty(Cycle2Ch, Q2),
+
+    ok.
+
+%% Arrows indicate message flow. Numbers indicate max_hops.
+%%
+%% Dylan ---1--> Bugs ---2--> Jessica
+%% |^                              |^
+%% |\--------------1---------------/|
+%% \---------------1----------------/
+%%
+%%
+%% We want to demonstrate that if we bind a queue locally at each
+%% broker, (exactly) the following bindings propagate:
+%%
+%% Bugs binds to Dylan
+%% Jessica binds to Bugs, which then propagates on to Dylan
+%% Jessica binds to Dylan directly
+%% Dylan binds to Jessica.
+%%
+%% i.e. Dylan has two bindings from Jessica and one from Bugs
+%%      Bugs has one binding from Jessica
+%%      Jessica has one binding from Dylan
+%%
+%% So we tag each binding with its original broker and see how far it gets
+%%
+%% Also we check that when we tear down the original bindings
+%% that we get rid of everything again.
+
+binding_propagation(Config) ->
+    [Dylan, Bugs, Jessica] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    set_policy_upstream(Config, Dylan, <<"^x$">>,
+      rabbit_ct_broker_helpers:node_uri(Config, Jessica), []),
+    set_policy_upstream(Config, Bugs, <<"^x$">>,
+      rabbit_ct_broker_helpers:node_uri(Config, Dylan), []),
+    set_policy_upstreams(Config, Jessica, <<"^x$">>, [
+        {rabbit_ct_broker_helpers:node_uri(Config, Dylan), []},
+        {rabbit_ct_broker_helpers:node_uri(Config, Bugs),
+          [{<<"max-hops">>, 2}]}
+      ]),
+    DylanCh   = rabbit_ct_client_helpers:open_channel(Config, Dylan),
+    BugsCh    = rabbit_ct_client_helpers:open_channel(Config, Bugs),
+    JessicaCh = rabbit_ct_client_helpers:open_channel(Config, Jessica),
+
+    declare_exchange(DylanCh,   x(<<"x">>)),
+    declare_exchange(BugsCh,    x(<<"x">>)),
+    declare_exchange(JessicaCh, x(<<"x">>)),
+
+    Q1 = bind_queue(DylanCh,   <<"x">>, <<"dylan">>),
+    Q2 = bind_queue(BugsCh,    <<"x">>, <<"bugs">>),
+    Q3 = bind_queue(JessicaCh, <<"x">>, <<"jessica">>),
+
+    await_binding(Config,  Dylan,   <<"x">>, <<"jessica">>, 2),
+    await_bindings(Config, Dylan,   <<"x">>, [<<"bugs">>, <<"dylan">>]),
+    await_bindings(Config, Bugs,    <<"x">>, [<<"jessica">>, <<"bugs">>]),
+    await_bindings(Config, Jessica, <<"x">>, [<<"dylan">>, <<"jessica">>]),
+
+    delete_queue(DylanCh,   Q1),
+    delete_queue(BugsCh,    Q2),
+    delete_queue(JessicaCh, Q3),
+
+    await_bindings(Config, Dylan,   <<"x">>, []),
+    await_bindings(Config, Bugs,    <<"x">>, []),
+    await_bindings(Config, Jessica, <<"x">>, []),
+
+    ok.
+
+upstream_has_no_federation(Config) ->
+    [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    set_policy_upstream(Config, Rabbit, <<"^test$">>,
+      rabbit_ct_broker_helpers:node_uri(Config, Hare), []),
+    Downstream = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+    Upstream   = rabbit_ct_client_helpers:open_channel(Config, Hare),
+    declare_exchange(Upstream, x(<<"test">>)),
+    declare_exchange(Downstream, x(<<"test">>)),
+    Q = bind_queue(Downstream, <<"test">>, <<"routing">>),
+    await_binding(Config, Hare, <<"test">>, <<"routing">>),
+    publish(Upstream, <<"test">>, <<"routing">>, <<"HELLO">>),
+    expect(Downstream, Q, [<<"HELLO">>]),
+    ok.
+
+dynamic_reconfiguration(Config) ->
+    with_ch(Config,
+      fun (_Ch) ->
+              Xs = [<<"all.fed1">>, <<"all.fed2">>],
+              %% Left from the conf we set up for previous tests
+              assert_connections(Config, 0, Xs, [<<"localhost">>, <<"local5673">>]),
+
+              %% Test that clearing connections works
+              clear_upstream(Config, 0, <<"localhost">>),
+              clear_upstream(Config, 0, <<"local5673">>),
+              assert_connections(Config, 0, Xs, []),
+
+              %% Test that readding them and changing them works
+              set_upstream(Config, 0,
+                <<"localhost">>, rabbit_ct_broker_helpers:node_uri(Config, 0)),
+              %% Do it twice so we at least hit the no-restart optimisation
+              URI = rabbit_ct_broker_helpers:node_uri(Config, 0, [use_ipaddr]),
+              set_upstream(Config, 0, <<"localhost">>, URI),
+              set_upstream(Config, 0, <<"localhost">>, URI),
+              assert_connections(Config, 0, Xs, [<<"localhost">>]),
+
+              %% And re-add the last - for next test
+              rabbit_federation_test_util:setup_federation(Config)
+      end, [x(<<"all.fed1">>), x(<<"all.fed2">>)]).
+
+dynamic_reconfiguration_integrity(Config) ->
+    with_ch(Config,
+      fun (_Ch) ->
+              Xs = [<<"new.fed1">>, <<"new.fed2">>],
+
+              %% Declared exchanges with nonexistent set - no links
+              assert_connections(Config, 0, Xs, []),
+
+              %% Create the set - links appear
+              set_upstream_set(Config, 0, <<"new-set">>, [{<<"localhost">>, []}]),
+              assert_connections(Config, 0, Xs, [<<"localhost">>]),
+
+              %% Add nonexistent connections to set - nothing breaks
+              set_upstream_set(Config, 0,
+                <<"new-set">>, [{<<"localhost">>, []},
+                                {<<"does-not-exist">>, []}]),
+              assert_connections(Config, 0, Xs, [<<"localhost">>]),
+
+              %% Change connection in set - links change
+              set_upstream_set(Config, 0, <<"new-set">>, [{<<"local5673">>, []}]),
+              assert_connections(Config, 0, Xs, [<<"local5673">>])
+      end, [x(<<"new.fed1">>), x(<<"new.fed2">>)]).
+
+federate_unfederate(Config) ->
+    with_ch(Config,
+      fun (_Ch) ->
+              Xs = [<<"dyn.exch1">>, <<"dyn.exch2">>],
+
+              %% Declared non-federated exchanges - no links
+              assert_connections(Config, 0, Xs, []),
+
+              %% Federate them - links appear
+              set_policy(Config, 0, <<"dyn">>, <<"^dyn\\.">>, <<"all">>),
+              assert_connections(Config, 0, Xs, [<<"localhost">>, <<"local5673">>]),
+
+              %% Change policy - links change
+              set_policy(Config, 0, <<"dyn">>, <<"^dyn\\.">>, <<"localhost">>),
+              assert_connections(Config, 0, Xs, [<<"localhost">>]),
+
+              %% Unfederate them - links disappear
+              clear_policy(Config, 0, <<"dyn">>),
+              assert_connections(Config, 0, Xs, [])
+      end, [x(<<"dyn.exch1">>), x(<<"dyn.exch2">>)]).
+
+dynamic_plugin_stop_start(Config) ->
+    X1 = <<"dyn.exch1">>,
+    X2 = <<"dyn.exch2">>,
+    with_ch(Config,
+      fun (Ch) ->
+              set_policy(Config, 0, <<"dyn">>, <<"^dyn\\.">>, <<"localhost">>),
+
+              %% Declare federated exchange - get link
+              assert_connections(Config, 0, [X1], [<<"localhost">>]),
+
+              %% Disable plugin, link goes
+              ok = rabbit_ct_broker_helpers:disable_plugin(Config, 0,
+                "rabbitmq_federation"),
+              %% We can't check with status for obvious reasons...
+              undefined = rabbit_ct_broker_helpers:rpc(Config, 0,
+                erlang, whereis, [rabbit_federation_sup]),
+              {error, not_found} = rabbit_ct_broker_helpers:rpc(Config, 0,
+                rabbit_registry, lookup_module,
+                [exchange, 'x-federation-upstream']),
+
+              %% Create exchange then re-enable plugin, links appear
+              declare_exchange(Ch, x(X2)),
+              ok = rabbit_ct_broker_helpers:enable_plugin(Config, 0,
+                "rabbitmq_federation"),
+              assert_connections(Config, 0, [X1, X2], [<<"localhost">>]),
+              {ok, _} = rabbit_ct_broker_helpers:rpc(Config, 0,
+                rabbit_registry, lookup_module,
+                [exchange, 'x-federation-upstream']),
+
+              %% Test both exchanges work. They are just federated to
+              %% themselves so should duplicate messages.
+              [begin
+                   Q = bind_queue(Ch, X, <<"key">>),
+                   await_binding(Config, 0, X, <<"key">>, 2),
+                   publish(Ch, X, <<"key">>, <<"HELLO">>),
+                   expect(Ch, Q, [<<"HELLO">>, <<"HELLO">>]),
+                   delete_queue(Ch, Q)
+               end || X <- [X1, X2]],
+
+              clear_policy(Config, 0, <<"dyn">>),
+              assert_connections(Config, 0, [X1, X2], [])
+      end, [x(X1)]).
+
+%%----------------------------------------------------------------------------
+
+with_ch(Config, Fun, Xs) ->
+    Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+    declare_all(Ch, Xs),
+    rabbit_federation_test_util:assert_status(Config, 0,
+      Xs, {exchange, upstream_exchange}),
+    Fun(Ch),
+    delete_all(Ch, Xs),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    cleanup(Config, 0),
+    ok.
+
+cleanup(Config, Node) ->
+    [rabbit_ct_broker_helpers:rpc(Config, Node,
+        rabbit_amqqueue, delete, [Q, false, false]) ||
+      Q <- queues(Config, Node)].
+
+queues(Config, Node) ->
+    Ret = rabbit_ct_broker_helpers:rpc(Config, Node,
+      rabbit_amqqueue, list, [<<"/">>]),
+    case Ret of
+        {badrpc, _} -> [];
+        Qs          -> Qs
+    end.
+
+stop_other_node(Config, Node) ->
+    cleanup(Config, Node),
+    rabbit_federation_test_util:stop_other_node(Config, Node).
+
+declare_all(Ch, Xs) -> [declare_exchange(Ch, X) || X <- Xs].
+delete_all(Ch, Xs) ->
+    [delete_exchange(Ch, X) || #'exchange.declare'{exchange = X} <- Xs].
+
+declare_exchange(Ch, X) ->
+    amqp_channel:call(Ch, X).
+
+x(Name) -> x(Name, <<"topic">>).
+
+x(Name, Type) ->
+    #'exchange.declare'{exchange = Name,
+                        type     = Type,
+                        durable  = true}.
+
+r(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
+
+declare_queue(Ch) ->
+    #'queue.declare_ok'{queue = Q} =
+        amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+    Q.
+
+bind_queue(Ch, Q, X, Key) ->
+    amqp_channel:call(Ch, #'queue.bind'{queue       = Q,
+                                        exchange    = X,
+                                        routing_key = Key}).
+
+unbind_queue(Ch, Q, X, Key) ->
+    amqp_channel:call(Ch, #'queue.unbind'{queue       = Q,
+                                          exchange    = X,
+                                          routing_key = Key}).
+
+bind_exchange(Ch, D, S, Key) ->
+    amqp_channel:call(Ch, #'exchange.bind'{destination = D,
+                                           source      = S,
+                                           routing_key = Key}).
+
+bind_queue(Ch, X, Key) ->
+    Q = declare_queue(Ch),
+    bind_queue(Ch, Q, X, Key),
+    Q.
+
+delete_exchange(Ch, X) ->
+    amqp_channel:call(Ch, #'exchange.delete'{exchange = X}).
+
+delete_queue(Ch, Q) ->
+    amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+await_binding(Config, Node, X, Key) ->
+    await_binding(Config, Node, X, Key, 1).
+
+await_binding(Config, Node, X, Key, Count) ->
+    case bound_keys_from(Config, Node, X, Key) of
+        L when length(L) <   Count -> timer:sleep(100),
+                                      await_binding(Config, Node, X, Key, Count);
+        L when length(L) =:= Count -> ok;
+        L                          -> exit({too_many_bindings,
+                                            X, Key, Count, L})
+    end.
+
+await_bindings(Config, Node, X, Keys) ->
+    [await_binding(Config, Node, X, Key) || Key <- Keys].
+
+await_binding_absent(Config, Node, X, Key) ->
+    case bound_keys_from(Config, Node, X, Key) of
+        [] -> ok;
+        _  -> timer:sleep(100),
+              await_binding_absent(Config, Node, X, Key)
+    end.
+
+bound_keys_from(Config, Node, X, Key) ->
+    List = rabbit_ct_broker_helpers:rpc(Config, Node,
+      rabbit_binding, list_for_source, [r(X)]),
+    [K || #binding{key = K} <- List, K =:= Key].
+
+publish(Ch, X, Key, Payload) when is_binary(Payload) ->
+    publish(Ch, X, Key, #amqp_msg{payload = Payload});
+
+publish(Ch, X, Key, Msg = #amqp_msg{}) ->
+    amqp_channel:call(Ch, #'basic.publish'{exchange    = X,
+                                           routing_key = Key}, Msg).
+
+publish_expect(Ch, X, Key, Q, Payload) ->
+    publish(Ch, X, Key, Payload),
+    expect(Ch, Q, [Payload]).
+
+%%----------------------------------------------------------------------------
+
+assert_connections(Config, Node, Xs, Conns) ->
+    rabbit_ct_broker_helpers:rpc(Config, Node,
+      ?MODULE, assert_connections1, [Xs, Conns]).
+
+assert_connections1(Xs, Conns) ->
+    Links = [{X, C, X} ||
+                X <- Xs,
+                C <- Conns],
+    Remaining = lists:foldl(
+                  fun (Link, Status) ->
+                          rabbit_federation_test_util:assert_link_status(
+                            Link, Status, {exchange, upstream_exchange})
+                  end, rabbit_federation_status:status(), Links),
+    [] = Remaining,
+    ok.
+
+connection_pids(Config, Node) ->
+    [P || [{pid, P}] <-
+              rabbit_ct_broker_helpers:rpc(Config, Node,
+                rabbit_networking, connection_info_all, [[pid]])].
similarity index 54%
rename from rabbitmq-server/deps/rabbitmq_federation/test/src/rabbit_federation_queue_test.erl
rename to rabbitmq-server/deps/rabbitmq_federation/test/queue_SUITE.erl
index 4f3cf5a0bf164dffc47c4bc65ffb942fb9d4a77f..761e5415f092f9e2ed178f4a5ecbcbb87b16ea77 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_federation_queue_test).
+-module(queue_SUITE).
 
--compile(export_all).
--include("rabbit_federation.hrl").
--include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
 
--import(rabbit_misc, [pget/2]).
--import(rabbit_federation_util, [name/1]).
--import(rabbit_test_util, [enable_plugin/2, disable_plugin/2]).
+-compile(export_all).
 
 -import(rabbit_federation_test_util,
         [expect/3,
-         set_upstream/3, clear_upstream/2, set_policy/4, clear_policy/2,
-         set_policy_upstream/4, set_policy_upstreams/3,
-         disambiguate/1, single_cfg/0]).
+         set_upstream/4, clear_upstream/3, set_policy/5, clear_policy/3,
+         set_policy_upstream/5, set_policy_upstreams/4]).
 
 -define(UPSTREAM_DOWNSTREAM, [q(<<"upstream">>),
                               q(<<"fed.downstream">>)]).
 
-%% Used in restart_upstream_test
--define(HARE, {"hare", 5673}).
-
-simple_test() ->
-    with_ch(
+all() ->
+    [
+      {group, without_disambiguate},
+      {group, with_disambiguate}
+    ].
+
+groups() ->
+    [
+      {without_disambiguate, [], [
+          {cluster_size_1, [], [
+              simple,
+              multiple_upstreams,
+              multiple_downstreams,
+              bidirectional,
+              dynamic_reconfiguration,
+              federate_unfederate,
+              dynamic_plugin_stop_start
+            ]}
+        ]},
+      {with_disambiguate, [], [
+          {cluster_size_2, [], [
+              restart_upstream
+            ]}
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(without_disambiguate, Config) ->
+    rabbit_ct_helpers:set_config(Config,
+      {disambiguate_step, []});
+init_per_group(with_disambiguate, Config) ->
+    rabbit_ct_helpers:set_config(Config,
+      {disambiguate_step, [fun rabbit_federation_test_util:disambiguate/1]});
+init_per_group(cluster_size_1 = Group, Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 1}
+      ]),
+    init_per_group1(Group, Config1);
+init_per_group(cluster_size_2 = Group, Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 2}
+      ]),
+    init_per_group1(Group, Config1).
+
+init_per_group1(Group, Config) ->
+    SetupFederation = case Group of
+        cluster_size_1 -> [fun rabbit_federation_test_util:setup_federation/1];
+        cluster_size_2 -> []
+    end,
+    Disambiguate = ?config(disambiguate_step, Config),
+    Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, Suffix},
+        {rmq_nodes_clustered, false}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps() ++
+      SetupFederation ++ Disambiguate).
+
+end_per_group(without_disambiguate, Config) ->
+    Config;
+end_per_group(with_disambiguate, Config) ->
+    Config;
+end_per_group(_, Config) ->
+    rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+simple(Config) ->
+    with_ch(Config,
       fun (Ch) ->
               expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>)
       end, [q(<<"upstream">>),
             q(<<"fed.downstream">>)]).
 
-multiple_upstreams_test() ->
-    with_ch(
+multiple_upstreams(Config) ->
+    with_ch(Config,
       fun (Ch) ->
               expect_federation(Ch, <<"upstream">>, <<"fed12.downstream">>),
               expect_federation(Ch, <<"upstream2">>, <<"fed12.downstream">>)
@@ -53,8 +134,8 @@ multiple_upstreams_test() ->
             q(<<"upstream2">>),
             q(<<"fed12.downstream">>)]).
 
-multiple_downstreams_test() ->
-    with_ch(
+multiple_downstreams(Config) ->
+    with_ch(Config,
       fun (Ch) ->
               expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>),
               expect_federation(Ch, <<"upstream">>, <<"fed.downstream2">>)
@@ -62,8 +143,8 @@ multiple_downstreams_test() ->
             q(<<"fed.downstream">>),
             q(<<"fed.downstream2">>)]).
 
-bidirectional_test() ->
-    with_ch(
+bidirectional(Config) ->
+    with_ch(Config,
       fun (Ch) ->
               publish_expect(Ch, <<>>, <<"one">>, <<"one">>, <<"first one">>),
               publish_expect(Ch, <<>>, <<"two">>, <<"two">>, <<"first two">>),
@@ -77,85 +158,87 @@ bidirectional_test() ->
       end, [q(<<"one">>),
             q(<<"two">>)]).
 
-dynamic_reconfiguration_test() ->
-    Cfg = single_cfg(),
-    with_ch(
+dynamic_reconfiguration(Config) ->
+    with_ch(Config,
       fun (Ch) ->
               expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>),
 
               %% Test that clearing connections works
-              clear_upstream(Cfg, <<"localhost">>),
+              clear_upstream(Config, 0, <<"localhost">>),
               expect_no_federation(Ch, <<"upstream">>, <<"fed.downstream">>),
 
               %% Test that readding them and changing them works
-              set_upstream(Cfg, <<"localhost">>, <<"amqp://localhost">>),
+              set_upstream(Config, 0,
+                <<"localhost">>, rabbit_ct_broker_helpers:node_uri(Config, 0)),
               %% Do it twice so we at least hit the no-restart optimisation
-              set_upstream(Cfg, <<"localhost">>, <<"amqp://">>),
-              set_upstream(Cfg, <<"localhost">>, <<"amqp://">>),
+              URI = rabbit_ct_broker_helpers:node_uri(Config, 0, [use_ipaddr]),
+              set_upstream(Config, 0, <<"localhost">>, URI),
+              set_upstream(Config, 0, <<"localhost">>, URI),
               expect_federation(Ch, <<"upstream">>, <<"fed.downstream">>)
       end, [q(<<"upstream">>),
             q(<<"fed.downstream">>)]).
 
-federate_unfederate_test() ->
-    Cfg = single_cfg(),
-    with_ch(
+federate_unfederate(Config) ->
+    with_ch(Config,
       fun (Ch) ->
               expect_no_federation(Ch, <<"upstream">>, <<"downstream">>),
               expect_no_federation(Ch, <<"upstream2">>, <<"downstream">>),
 
               %% Federate it
-              set_policy(Cfg, <<"dyn">>, <<"^downstream\$">>, <<"upstream">>),
+              set_policy(Config, 0, <<"dyn">>,
+                <<"^downstream\$">>, <<"upstream">>),
               expect_federation(Ch, <<"upstream">>, <<"downstream">>),
               expect_no_federation(Ch, <<"upstream2">>, <<"downstream">>),
 
               %% Change policy - upstream changes
-              set_policy(Cfg, <<"dyn">>, <<"^downstream\$">>, <<"upstream2">>),
+              set_policy(Config, 0, <<"dyn">>,
+                <<"^downstream\$">>, <<"upstream2">>),
               expect_no_federation(Ch, <<"upstream">>, <<"downstream">>),
               expect_federation(Ch, <<"upstream2">>, <<"downstream">>),
 
               %% Unfederate it - no federation
-              clear_policy(Cfg, <<"dyn">>),
+              clear_policy(Config, 0, <<"dyn">>),
               expect_no_federation(Ch, <<"upstream2">>, <<"downstream">>)
       end, [q(<<"upstream">>),
             q(<<"upstream2">>),
             q(<<"downstream">>)]).
 
-dynamic_plugin_stop_start_test() ->
-    Cfg = single_cfg(),
+dynamic_plugin_stop_start(Config) ->
     Q1 = <<"dyn.q1">>,
     Q2 = <<"dyn.q2">>,
     U = <<"upstream">>,
-    with_ch(
+    with_ch(Config,
       fun (Ch) ->
-              set_policy(Cfg, <<"dyn">>, <<"^dyn\\.">>, U),
+              set_policy(Config, 0, <<"dyn">>, <<"^dyn\\.">>, U),
               %% Declare federated queue - get link
               expect_federation(Ch, U, Q1),
 
               %% Disable plugin, link goes
-              ok = disable_plugin(Cfg, "rabbitmq_federation"),
+              ok = rabbit_ct_broker_helpers:disable_plugin(Config, 0,
+                "rabbitmq_federation"),
               expect_no_federation(Ch, U, Q1),
 
               %% Create exchange then re-enable plugin, links appear
               declare_queue(Ch, q(Q2)),
-              ok = enable_plugin(Cfg, "rabbitmq_federation"),
+              ok = rabbit_ct_broker_helpers:enable_plugin(Config, 0,
+                "rabbitmq_federation"),
               expect_federation(Ch, U, Q1),
               expect_federation(Ch, U, Q2),
 
-              clear_policy(Cfg, <<"dyn">>),
+              clear_policy(Config, 0, <<"dyn">>),
               expect_no_federation(Ch, U, Q1),
               expect_no_federation(Ch, U, Q2),
               delete_queue(Ch, Q2)
       end, [q(Q1), q(U)]).
 
-%% Downstream: rabbit-test, port 5672
-%% Upstream:   hare,        port 5673
+restart_upstream(Config) ->
+    [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    set_policy_upstream(Config, Rabbit, <<"^test$">>,
+      rabbit_ct_broker_helpers:node_uri(Config, Hare), []),
 
-restart_upstream_with() -> disambiguate(start_ab).
-restart_upstream([Rabbit, Hare]) ->
-    set_policy_upstream(Rabbit, <<"^test$">>, <<"amqp://localhost:5673">>, []),
-
-    {_, Downstream} = rabbit_test_util:connect(Rabbit),
-    {_, Upstream}   = rabbit_test_util:connect(Hare),
+    Downstream = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+    Upstream   = rabbit_ct_client_helpers:open_channel(Config, Hare),
 
     declare_queue(Upstream, q(<<"test">>)),
     declare_queue(Downstream, q(<<"test">>)),
@@ -164,8 +247,9 @@ restart_upstream([Rabbit, Hare]) ->
     expect(Upstream, <<"test">>, repeat(25, <<"bulk">>)),
     expect(Downstream, <<"test">>, repeat(25, <<"bulk">>)),
 
-    Hare2 = rabbit_test_configs:restart_node(Hare),
-    {_, Upstream2} = rabbit_test_util:connect(Hare2),
+    rabbit_ct_client_helpers:close_channels_and_connection(Config, Hare),
+    ok = rabbit_ct_broker_helpers:restart_node(Config, Hare),
+    Upstream2 = rabbit_ct_client_helpers:open_channel(Config, Hare),
 
     expect(Upstream2, <<"test">>, repeat(25, <<"bulk">>)),
     expect(Downstream, <<"test">>, repeat(25, <<"bulk">>)),
@@ -174,22 +258,21 @@ restart_upstream([Rabbit, Hare]) ->
 
     ok.
 
-upstream_has_no_federation_test() ->
-    %% TODO
-    ok.
+%upstream_has_no_federation(Config) ->
+%    %% TODO
+%    ok.
 
 %%----------------------------------------------------------------------------
 
-with_ch(Fun, Qs) ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
+with_ch(Config, Fun, Qs) ->
+    Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
     declare_all(Ch, Qs),
     timer:sleep(1000), %% Time for statuses to get updated
-    rabbit_federation_test_util:assert_status(
+    rabbit_federation_test_util:assert_status(Config, 0,
       Qs, {queue, upstream_queue}),
     Fun(Ch),
     delete_all(Ch, Qs),
-    amqp_connection:close(Conn),
+    rabbit_ct_client_helpers:close_channel(Ch),
     ok.
 
 declare_all(Ch, Qs) -> [declare_queue(Ch, Q) || Q <- Qs].
diff --git a/rabbitmq-server/deps/rabbitmq_federation/test/rabbit_federation_test_util.erl b/rabbitmq-server/deps/rabbitmq_federation/test/rabbit_federation_test_util.erl
new file mode 100644 (file)
index 0000000..9b79ff9
--- /dev/null
@@ -0,0 +1,250 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Federation.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_federation_test_util).
+
+-include("rabbit_federation.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-import(rabbit_misc, [pget/2]).
+
+setup_federation(Config) ->
+    rabbit_ct_broker_helpers:set_parameter(Config, 0,
+      <<"federation-upstream">>, <<"localhost">>, [
+        {<<"uri">>, rabbit_ct_broker_helpers:node_uri(Config, 0)}]),
+
+    rabbit_ct_broker_helpers:set_parameter(Config, 0,
+      <<"federation-upstream">>, <<"local5673">>, [
+        {<<"uri">>, <<"amqp://localhost:1">>}]),
+
+    rabbit_ct_broker_helpers:set_parameter(Config, 0,
+      <<"federation-upstream-set">>, <<"upstream">>, [
+        [
+          {<<"upstream">>, <<"localhost">>},
+          {<<"exchange">>, <<"upstream">>},
+          {<<"queue">>, <<"upstream">>}
+        ]
+      ]),
+
+    rabbit_ct_broker_helpers:set_parameter(Config, 0,
+      <<"federation-upstream-set">>, <<"upstream2">>, [
+        [
+          {<<"upstream">>, <<"localhost">>},
+          {<<"exchange">>, <<"upstream2">>},
+          {<<"queue">>, <<"upstream2">>}
+        ]
+      ]),
+
+    rabbit_ct_broker_helpers:set_parameter(Config, 0,
+      <<"federation-upstream-set">>, <<"localhost">>, [
+        [{<<"upstream">>, <<"localhost">>}]
+      ]),
+
+    rabbit_ct_broker_helpers:set_parameter(Config, 0,
+      <<"federation-upstream-set">>, <<"upstream12">>, [
+        [
+          {<<"upstream">>, <<"localhost">>},
+          {<<"exchange">>, <<"upstream">>},
+          {<<"queue">>, <<"upstream">>}
+        ], [
+          {<<"upstream">>, <<"localhost">>},
+          {<<"exchange">>, <<"upstream2">>},
+          {<<"queue">>, <<"upstream2">>}
+        ]
+      ]),
+
+    rabbit_ct_broker_helpers:set_parameter(Config, 0,
+      <<"federation-upstream-set">>, <<"one">>, [
+        [
+          {<<"upstream">>, <<"localhost">>},
+          {<<"exchange">>, <<"one">>},
+          {<<"queue">>, <<"one">>}
+        ]
+      ]),
+
+    rabbit_ct_broker_helpers:set_parameter(Config, 0,
+      <<"federation-upstream-set">>, <<"two">>, [
+        [
+          {<<"upstream">>, <<"localhost">>},
+          {<<"exchange">>, <<"two">>},
+          {<<"queue">>, <<"two">>}
+        ]
+      ]),
+
+    rabbit_ct_broker_helpers:set_parameter(Config, 0,
+      <<"federation-upstream-set">>, <<"upstream5673">>, [
+        [
+          {<<"upstream">>, <<"local5673">>},
+          {<<"exchange">>, <<"upstream">>}
+        ]
+      ]),
+
+    rabbit_ct_broker_helpers:set_policy(Config, 0,
+      <<"fed">>, <<"^fed\.">>, <<"all">>, [
+        {<<"federation-upstream-set">>, <<"upstream">>}]),
+
+    rabbit_ct_broker_helpers:set_policy(Config, 0,
+      <<"fed12">>, <<"^fed12\.">>, <<"all">>, [
+        {<<"federation-upstream-set">>, <<"upstream12">>}]),
+
+    rabbit_ct_broker_helpers:set_policy(Config, 0,
+      <<"one">>, <<"^two$">>, <<"all">>, [
+        {<<"federation-upstream-set">>, <<"one">>}]),
+
+    rabbit_ct_broker_helpers:set_policy(Config, 0,
+      <<"two">>, <<"^one$">>, <<"all">>, [
+        {<<"federation-upstream-set">>, <<"two">>}]),
+
+    rabbit_ct_broker_helpers:set_policy(Config, 0,
+      <<"hare">>, <<"^hare\.">>, <<"all">>, [
+        {<<"federation-upstream-set">>, <<"upstream5673">>}]),
+
+    rabbit_ct_broker_helpers:set_policy(Config, 0,
+      <<"all">>, <<"^all\.">>, <<"all">>, [
+        {<<"federation-upstream-set">>, <<"all">>}]),
+
+    rabbit_ct_broker_helpers:set_policy(Config, 0,
+      <<"new">>, <<"^new\.">>, <<"all">>, [
+        {<<"federation-upstream-set">>, <<"new-set">>}]),
+    Config.
+
+expect(Ch, Q, Fun) when is_function(Fun) ->
+    amqp_channel:subscribe(Ch, #'basic.consume'{queue  = Q,
+                                                no_ack = true}, self()),
+    CTag = receive
+        #'basic.consume_ok'{consumer_tag = CT} -> CT
+    end,
+    Fun(),
+    amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag});
+
+expect(Ch, Q, Payloads) ->
+    expect(Ch, Q, fun() -> expect(Payloads) end).
+
+expect([]) ->
+    ok;
+expect(Payloads) ->
+    receive
+        {#'basic.deliver'{}, #amqp_msg{payload = Payload}} ->
+            case lists:member(Payload, Payloads) of
+                true  -> expect(Payloads -- [Payload]);
+                false -> throw({expected, Payloads, actual, Payload})
+            end
+    end.
+
+expect_empty(Ch, Q) ->
+    ?assertMatch(#'basic.get_empty'{},
+                 amqp_channel:call(Ch, #'basic.get'{ queue = Q })).
+
+set_upstream(Config, Node, Name, URI) ->
+    set_upstream(Config, Node, Name, URI, []).
+
+set_upstream(Config, Node, Name, URI, Extra) ->
+    rabbit_ct_broker_helpers:set_parameter(Config, Node,
+      <<"federation-upstream">>, Name, [{<<"uri">>, URI} | Extra]).
+
+clear_upstream(Config, Node, Name) ->
+    rabbit_ct_broker_helpers:clear_parameter(Config, Node,
+      <<"federation-upstream">>, Name).
+
+set_upstream_set(Config, Node, Name, Set) ->
+    rabbit_ct_broker_helpers:set_parameter(Config, Node,
+      <<"federation-upstream-set">>, Name,
+      [[{<<"upstream">>, UStream} | Extra] || {UStream, Extra} <- Set]).
+
+set_policy(Config, Node, Name, Pattern, UpstreamSet) ->
+    rabbit_ct_broker_helpers:set_policy(Config, Node,
+      Name, Pattern, <<"all">>,
+      [{<<"federation-upstream-set">>, UpstreamSet}]).
+
+clear_policy(Config, Node, Name) ->
+    rabbit_ct_broker_helpers:clear_policy(Config, Node, Name).
+
+set_policy_upstream(Config, Node, Pattern, URI, Extra) ->
+    set_policy_upstreams(Config, Node, Pattern, [{URI, Extra}]).
+
+set_policy_upstreams(Config, Node, Pattern, URIExtras) ->
+    put(upstream_num, 1),
+    [set_upstream(Config, Node, gen_upstream_name(), URI, Extra)
+     || {URI, Extra} <- URIExtras],
+    set_policy(Config, Node, Pattern, Pattern, <<"all">>).
+
+gen_upstream_name() ->
+    list_to_binary("upstream-" ++ integer_to_list(next_upstream_num())).
+
+next_upstream_num() ->
+    R = get(upstream_num) + 1,
+    put(upstream_num, R),
+    R.
+
+%% Make sure that even though multiple nodes are in a single
+%% distributed system, we still keep all our process groups separate.
+disambiguate(Config) ->
+    rabbit_ct_broker_helpers:rpc_all(Config,
+      application, set_env,
+      [rabbitmq_federation, pgroup_name_cluster_id, true]),
+    Config.
+
+no_plugins(Cfg) ->
+    [{K, case K of
+             plugins -> none;
+             _       -> V
+         end} || {K, V} <- Cfg].
+
+%%----------------------------------------------------------------------------
+
+assert_status(Config, Node, XorQs, Names) ->
+    rabbit_ct_broker_helpers:rpc(Config, Node,
+      ?MODULE, assert_status1, [XorQs, Names]).
+
+assert_status1(XorQs, Names) ->
+    Links = lists:append([links(XorQ) || XorQ <- XorQs]),
+    Remaining = lists:foldl(fun (Link, Status) ->
+                                    assert_link_status(Link, Status, Names)
+                            end, rabbit_federation_status:status(), Links),
+    ?assertEqual([], Remaining),
+    ok.
+
+assert_link_status({DXorQNameBin, UpstreamName, UXorQNameBin}, Status,
+                   {TypeName, UpstreamTypeName}) ->
+    {This, Rest} = lists:partition(
+                     fun(St) ->
+                             pget(upstream, St) =:= UpstreamName andalso
+                                 pget(TypeName, St) =:= DXorQNameBin andalso
+                                 pget(UpstreamTypeName, St) =:= UXorQNameBin
+                     end, Status),
+    ?assertMatch([_], This),
+    Rest.
+
+links(#'exchange.declare'{exchange = Name}) ->
+    case rabbit_policy:get(<<"federation-upstream-set">>, xr(Name)) of
+        undefined -> [];
+        Set       -> X = #exchange{name = xr(Name)},
+                     [{Name, U#upstream.name, U#upstream.exchange_name} ||
+                         U <- rabbit_federation_upstream:from_set(Set, X)]
+    end;
+links(#'queue.declare'{queue = Name}) ->
+    case rabbit_policy:get(<<"federation-upstream-set">>, qr(Name)) of
+        undefined -> [];
+        Set       -> Q = #amqqueue{name = qr(Name)},
+                     [{Name, U#upstream.name, U#upstream.queue_name} ||
+                         U <- rabbit_federation_upstream:from_set(Set, Q)]
+    end.
+
+xr(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
+qr(Name) -> rabbit_misc:r(<<"/">>, queue, Name).
diff --git a/rabbitmq-server/deps/rabbitmq_federation/test/src/rabbit_federation_exchange_test.erl b/rabbitmq-server/deps/rabbitmq_federation/test/src/rabbit_federation_exchange_test.erl
deleted file mode 100644 (file)
index 60d0966..0000000
+++ /dev/null
@@ -1,744 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ Federation.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(rabbit_federation_exchange_test).
-
--compile(export_all).
--include("rabbit_federation.hrl").
--include_lib("eunit/include/eunit.hrl").
--include_lib("amqp_client/include/amqp_client.hrl").
-
--import(rabbit_misc, [pget/2]).
--import(rabbit_federation_util, [name/1]).
--import(rabbit_test_util, [enable_plugin/2, disable_plugin/2]).
-
--import(rabbit_federation_test_util,
-        [expect/3, expect_empty/2,
-         set_upstream/3, clear_upstream/2, set_upstream_set/3,
-         set_policy/4, clear_policy/2,
-         set_policy_upstream/4, set_policy_upstreams/3,
-         disambiguate/1, no_plugins/1, single_cfg/0]).
-
--define(UPSTREAM_DOWNSTREAM, [x(<<"upstream">>),
-                              x(<<"fed.downstream">>)]).
-
-simple_test() ->
-    with_ch(
-      fun (Ch) ->
-              Q = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
-              await_binding(<<"upstream">>, <<"key">>),
-              publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO">>)
-      end, ?UPSTREAM_DOWNSTREAM).
-
-multiple_upstreams_test() ->
-    with_ch(
-      fun (Ch) ->
-              Q = bind_queue(Ch, <<"fed12.downstream">>, <<"key">>),
-              await_binding(<<"upstream">>, <<"key">>),
-              await_binding(<<"upstream2">>, <<"key">>),
-              publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO1">>),
-              publish_expect(Ch, <<"upstream2">>, <<"key">>, Q, <<"HELLO2">>)
-      end, [x(<<"upstream">>),
-            x(<<"upstream2">>),
-            x(<<"fed12.downstream">>)]).
-
-multiple_uris_test() ->
-    %% We can't use a direct connection for Kill() to work.
-    set_upstream(single_cfg(), <<"localhost">>,
-                 [<<"amqp://localhost">>, <<"amqp://localhost:5672">>]),
-    WithCh = fun(F) ->
-                     {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-                     {ok, Ch} = amqp_connection:open_channel(Conn),
-                     F(Ch),
-                     amqp_connection:close(Conn)
-             end,
-    WithCh(fun (Ch) -> declare_all(Ch, ?UPSTREAM_DOWNSTREAM) end),
-    expect_uris([<<"amqp://localhost">>, <<"amqp://localhost:5672">>]),
-    WithCh(fun (Ch) -> delete_all(Ch, ?UPSTREAM_DOWNSTREAM) end),
-    %% Put back how it was
-    set_upstream(single_cfg(), <<"localhost">>, <<"amqp://">>).
-
-expect_uris([])   -> ok;
-expect_uris(URIs) -> [Link] = rabbit_federation_status:status(),
-                     URI = pget(uri, Link),
-                     kill_only_connection(n(os:getenv("RABBITMQ_NODENAME"))),
-                     expect_uris(URIs -- [URI]).
-
-kill_only_connection(Node) ->
-    case connection_pids(Node) of
-        [Pid] -> catch rabbit_networking:close_connection(Pid, "boom"), %% [1]
-                 wait_for_pid_to_die(Node, Pid);
-        _     -> timer:sleep(100),
-                 kill_only_connection(Node)
-    end.
-
-%% [1] the catch is because we could still see a connection from a
-%% previous time round. If so that's fine (we'll just loop around
-%% again) but we don't want the test to fail because a connection
-%% closed as we were trying to close it.
-
-wait_for_pid_to_die(Node, Pid) ->
-    case connection_pids(Node) of
-        [Pid] -> timer:sleep(100),
-                 wait_for_pid_to_die(Node, Pid);
-        _     -> ok
-    end.
-
-
-multiple_downstreams_test() ->
-    with_ch(
-      fun (Ch) ->
-              Q1 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
-              Q12 = bind_queue(Ch, <<"fed12.downstream2">>, <<"key">>),
-              await_binding(<<"upstream">>, <<"key">>, 2),
-              await_binding(<<"upstream2">>, <<"key">>),
-              publish(Ch, <<"upstream">>, <<"key">>, <<"HELLO1">>),
-              publish(Ch, <<"upstream2">>, <<"key">>, <<"HELLO2">>),
-              expect(Ch, Q1, [<<"HELLO1">>]),
-              expect(Ch, Q12, [<<"HELLO1">>, <<"HELLO2">>])
-      end, ?UPSTREAM_DOWNSTREAM ++
-          [x(<<"upstream2">>),
-           x(<<"fed12.downstream2">>)]).
-
-e2e_test() ->
-    with_ch(
-      fun (Ch) ->
-              bind_exchange(Ch, <<"downstream2">>, <<"fed.downstream">>,
-                            <<"key">>),
-              await_binding(<<"upstream">>, <<"key">>),
-              Q = bind_queue(Ch, <<"downstream2">>, <<"key">>),
-              publish_expect(Ch, <<"upstream">>, <<"key">>, Q, <<"HELLO1">>)
-      end, ?UPSTREAM_DOWNSTREAM ++ [x(<<"downstream2">>)]).
-
-unbind_on_delete_test() ->
-    with_ch(
-      fun (Ch) ->
-              Q1 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
-              Q2 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
-              await_binding(<<"upstream">>, <<"key">>),
-              delete_queue(Ch, Q2),
-              publish_expect(Ch, <<"upstream">>, <<"key">>, Q1, <<"HELLO">>)
-      end, ?UPSTREAM_DOWNSTREAM).
-
-unbind_on_unbind_test() ->
-    with_ch(
-      fun (Ch) ->
-              Q1 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
-              Q2 = bind_queue(Ch, <<"fed.downstream">>, <<"key">>),
-              await_binding(<<"upstream">>, <<"key">>),
-              unbind_queue(Ch, Q2, <<"fed.downstream">>, <<"key">>),
-              publish_expect(Ch, <<"upstream">>, <<"key">>, Q1, <<"HELLO">>),
-              delete_queue(Ch, Q2)
-      end, ?UPSTREAM_DOWNSTREAM).
-
-user_id_with() -> disambiguate(start_ab).
-user_id([Rabbit, Hare]) ->
-    set_policy_upstream(Rabbit, <<"^test$">>, <<"amqp://localhost:5673">>, []),
-    Perm = fun (F, A) ->
-                  ok = rpc:call(pget(node, Hare),
-                                rabbit_auth_backend_internal, F, A)
-           end,
-    Perm(add_user, [<<"hare-user">>, <<"hare-user">>]),
-    Perm(set_permissions, [<<"hare-user">>,
-                           <<"/">>, <<".*">>, <<".*">>, <<".*">>]),
-
-    {_, Ch} = rabbit_test_util:connect(Rabbit),
-    {ok, Conn2} = amqp_connection:start(
-                    #amqp_params_network{username = <<"hare-user">>,
-                                         password = <<"hare-user">>,
-                                         port     = pget(port, Hare)}),
-    {ok, Ch2} = amqp_connection:open_channel(Conn2),
-
-    declare_exchange(Ch2, x(<<"test">>)),
-    declare_exchange(Ch, x(<<"test">>)),
-    Q = bind_queue(Ch, <<"test">>, <<"key">>),
-    await_binding(Hare, <<"test">>, <<"key">>),
-
-    Msg = #amqp_msg{props   = #'P_basic'{user_id = <<"hare-user">>},
-                    payload = <<"HELLO">>},
-
-    SafeUri = fun (H) ->
-                      {array, [{table, Recv}]} =
-                          rabbit_misc:table_lookup(
-                            H, <<"x-received-from">>),
-                      ?assertEqual(
-                         {longstr, <<"amqp://localhost:5673">>},
-                         rabbit_misc:table_lookup(Recv, <<"uri">>))
-              end,
-    ExpectUser =
-        fun (ExpUser) ->
-                fun () ->
-                        receive
-                            {#'basic.deliver'{},
-                             #amqp_msg{props   = Props,
-                                       payload = Payload}} ->
-                                #'P_basic'{user_id = ActUser,
-                                           headers = Headers} = Props,
-                                SafeUri(Headers),
-                                ?assertEqual(<<"HELLO">>, Payload),
-                                ?assertEqual(ExpUser, ActUser)
-                        end
-                end
-        end,
-
-    publish(Ch2, <<"test">>, <<"key">>, Msg),
-    expect(Ch, Q, ExpectUser(undefined)),
-
-    set_policy_upstream(Rabbit, <<"^test$">>, <<"amqp://localhost:5673">>,
-                        [{<<"trust-user-id">>, true}]),
-
-    publish(Ch2, <<"test">>, <<"key">>, Msg),
-    expect(Ch, Q, ExpectUser(<<"hare-user">>)),
-
-    ok.
-
-%% In order to test that unbinds get sent we deliberately set up a
-%% broken config - with topic upstream and fanout downstream. You
-%% shouldn't really do this, but it lets us see "extra" messages that
-%% get sent.
-unbind_gets_transmitted_test() ->
-    with_ch(
-      fun (Ch) ->
-              Q11 = bind_queue(Ch, <<"fed.downstream">>, <<"key1">>),
-              Q12 = bind_queue(Ch, <<"fed.downstream">>, <<"key1">>),
-              Q21 = bind_queue(Ch, <<"fed.downstream">>, <<"key2">>),
-              Q22 = bind_queue(Ch, <<"fed.downstream">>, <<"key2">>),
-              await_binding(<<"upstream">>, <<"key1">>),
-              await_binding(<<"upstream">>, <<"key2">>),
-              [delete_queue(Ch, Q) || Q <- [Q12, Q21, Q22]],
-              publish(Ch, <<"upstream">>, <<"key1">>, <<"YES">>),
-              publish(Ch, <<"upstream">>, <<"key2">>, <<"NO">>),
-              expect(Ch, Q11, [<<"YES">>]),
-              expect_empty(Ch, Q11)
-      end, [x(<<"upstream">>),
-            x(<<"fed.downstream">>)]).
-
-no_loop_test() ->
-    with_ch(
-      fun (Ch) ->
-              Q1 = bind_queue(Ch, <<"one">>, <<"key">>),
-              Q2 = bind_queue(Ch, <<"two">>, <<"key">>),
-              await_binding(<<"one">>, <<"key">>, 2),
-              await_binding(<<"two">>, <<"key">>, 2),
-              publish(Ch, <<"one">>, <<"key">>, <<"Hello from one">>),
-              publish(Ch, <<"two">>, <<"key">>, <<"Hello from two">>),
-              expect(Ch, Q1, [<<"Hello from one">>, <<"Hello from two">>]),
-              expect(Ch, Q2, [<<"Hello from one">>, <<"Hello from two">>]),
-              expect_empty(Ch, Q1),
-              expect_empty(Ch, Q2)
-      end, [x(<<"one">>),
-            x(<<"two">>)]).
-
-binding_recovery_with() -> disambiguate(
-                             fun (Init) ->
-                                     rabbit_test_configs:start_nodes(Init, [a])
-                             end).
-binding_recovery([Rabbit]) ->
-    Q = <<"durable-Q">>,
-    {_, Ch} = rabbit_test_util:connect(Rabbit),
-
-    rabbit_federation_test_util:set_upstream(
-      Rabbit, <<"rabbit">>, <<"amqp://localhost:5672">>),
-    rabbit_federation_test_util:set_upstream_set(
-      Rabbit, <<"upstream">>,
-      [{<<"rabbit">>, [{<<"exchange">>, <<"upstream">>}]},
-       {<<"rabbit">>, [{<<"exchange">>, <<"upstream2">>}]}]),
-    rabbit_federation_test_util:set_policy(
-      Rabbit, <<"fed">>, <<"^fed\\.">>, <<"upstream">>),
-
-    declare_all(Ch, [x(<<"upstream2">>) | ?UPSTREAM_DOWNSTREAM]),
-    #'queue.declare_ok'{} =
-        amqp_channel:call(Ch, #'queue.declare'{queue   = Q,
-                                               durable = true}),
-    bind_queue(Ch, Q, <<"fed.downstream">>, <<"key">>),
-    timer:sleep(100), %% To get the suffix written
-
-    %% i.e. don't clean up
-    Rabbit2 = rabbit_test_configs:restart_node(Rabbit),
-
-    ?assert(none =/= suffix(Rabbit2, <<"rabbit">>, "upstream")),
-    ?assert(none =/= suffix(Rabbit2, <<"rabbit">>, "upstream2")),
-
-    %% again don't clean up
-    Rabbit3 = rabbit_test_configs:restart_node(Rabbit2),
-    {_, Ch3} = rabbit_test_util:connect(Rabbit3),
-
-    rabbit_test_util:set_param(
-      Rabbit, <<"federation-upstream-set">>, <<"upstream">>,
-      [[{<<"upstream">>, <<"rabbit">>}, {<<"exchange">>, <<"upstream">>}]]),
-
-    publish_expect(Ch3, <<"upstream">>, <<"key">>, Q, <<"HELLO">>),
-    ?assert(none =/= suffix(Rabbit3, <<"rabbit">>, "upstream")),
-    ?assertEqual(none, suffix(Rabbit3, <<"rabbit">>, "upstream2")),
-    delete_all(Ch3, [x(<<"upstream2">>) | ?UPSTREAM_DOWNSTREAM]),
-    delete_queue(Ch3, Q),
-    ok.
-
-suffix(Cfg, Name, XName) ->
-    rpc:call(pget(node, Cfg), rabbit_federation_db, get_active_suffix,
-             [r(<<"fed.downstream">>),
-              #upstream{name          = Name,
-                        exchange_name = list_to_binary(XName)}, none]).
-
-%% TODO remove
-n(Nodename) ->
-    {_, NodeHost} = rabbit_nodes:parts(node()),
-    rabbit_nodes:make({Nodename, NodeHost}).
-
-restart_upstream_with() -> disambiguate(start_ab).
-restart_upstream([Rabbit, Hare]) ->
-    {_, Downstream} = rabbit_test_util:connect(Rabbit),
-    {_, Upstream}   = rabbit_test_util:connect(Hare),
-
-    rabbit_federation_test_util:set_upstream(
-      Rabbit, <<"hare">>, <<"amqp://localhost:5673">>),
-    rabbit_federation_test_util:set_upstream_set(
-      Rabbit, <<"upstream">>,
-      [{<<"hare">>, [{<<"exchange">>, <<"upstream">>}]}]),
-    rabbit_federation_test_util:set_policy(
-      Rabbit, <<"hare">>, <<"^hare\\.">>, <<"upstream">>),
-
-    declare_exchange(Upstream, x(<<"upstream">>)),
-    declare_exchange(Downstream, x(<<"hare.downstream">>)),
-
-    Qstays = bind_queue(Downstream, <<"hare.downstream">>, <<"stays">>),
-    Qgoes = bind_queue(Downstream, <<"hare.downstream">>, <<"goes">>),
-
-    Hare2 = rabbit_test_configs:stop_node(Hare),
-
-    Qcomes = bind_queue(Downstream, <<"hare.downstream">>, <<"comes">>),
-    unbind_queue(Downstream, Qgoes, <<"hare.downstream">>, <<"goes">>),
-
-    Hare3 = rabbit_test_configs:start_node(Hare2),
-    {_, Upstream1} = rabbit_test_util:connect(Hare3),
-
-    %% Wait for the link to come up and for these bindings
-    %% to be transferred
-    await_binding(Hare, <<"upstream">>, <<"comes">>, 1),
-    await_binding_absent(Hare, <<"upstream">>, <<"goes">>),
-    await_binding(Hare, <<"upstream">>, <<"stays">>, 1),
-
-    publish(Upstream1, <<"upstream">>, <<"goes">>, <<"GOES">>),
-    publish(Upstream1, <<"upstream">>, <<"stays">>, <<"STAYS">>),
-    publish(Upstream1, <<"upstream">>, <<"comes">>, <<"COMES">>),
-
-    expect(Downstream, Qstays, [<<"STAYS">>]),
-    expect(Downstream, Qcomes, [<<"COMES">>]),
-    expect_empty(Downstream, Qgoes),
-
-    delete_exchange(Downstream, <<"hare.downstream">>),
-    delete_exchange(Upstream1, <<"upstream">>),
-    ok.
-
-%% flopsy, mopsy and cottontail, connected in a ring with max_hops = 2
-%% for each connection. We should not see any duplicates.
-
-max_hops_with() -> disambiguate(start_abc).
-max_hops([Flopsy, Mopsy, Cottontail]) ->
-    [set_policy_upstream(
-       Cfg, <<"^ring$">>,
-       list_to_binary("amqp://localhost:" ++ integer_to_list(Port)),
-       [{<<"max-hops">>, 2}])
-     || {Cfg, Port} <- [{Flopsy,     pget(port, Cottontail)},
-                        {Mopsy,      pget(port, Flopsy)},
-                        {Cottontail, pget(port, Mopsy)}]],
-
-    {_, FlopsyCh}     = rabbit_test_util:connect(Flopsy),
-    {_, MopsyCh}      = rabbit_test_util:connect(Mopsy),
-    {_, CottontailCh} = rabbit_test_util:connect(Cottontail),
-
-    declare_exchange(FlopsyCh,     x(<<"ring">>)),
-    declare_exchange(MopsyCh,      x(<<"ring">>)),
-    declare_exchange(CottontailCh, x(<<"ring">>)),
-
-    Q1 = bind_queue(FlopsyCh,     <<"ring">>, <<"key">>),
-    Q2 = bind_queue(MopsyCh,      <<"ring">>, <<"key">>),
-    Q3 = bind_queue(CottontailCh, <<"ring">>, <<"key">>),
-
-    await_binding(Flopsy,     <<"ring">>, <<"key">>, 3),
-    await_binding(Mopsy,      <<"ring">>, <<"key">>, 3),
-    await_binding(Cottontail, <<"ring">>, <<"key">>, 3),
-
-    publish(FlopsyCh,     <<"ring">>, <<"key">>, <<"HELLO flopsy">>),
-    publish(MopsyCh,      <<"ring">>, <<"key">>, <<"HELLO mopsy">>),
-    publish(CottontailCh, <<"ring">>, <<"key">>, <<"HELLO cottontail">>),
-
-    Msgs = [<<"HELLO flopsy">>, <<"HELLO mopsy">>, <<"HELLO cottontail">>],
-    expect(FlopsyCh,     Q1, Msgs),
-    expect(MopsyCh,      Q2, Msgs),
-    expect(CottontailCh, Q3, Msgs),
-    expect_empty(FlopsyCh,     Q1),
-    expect_empty(MopsyCh,      Q2),
-    expect_empty(CottontailCh, Q3),
-    ok.
-
-%% Two nodes, both federated with each other, and max_hops set to a
-%% high value. Things should not get out of hand.
-cycle_detection_with() -> disambiguate(start_ab).
-cycle_detection([Cycle1, Cycle2]) ->
-    [set_policy_upstream(
-       Cfg, <<"^cycle$">>,
-       list_to_binary("amqp://localhost:" ++ integer_to_list(Port)),
-       [{<<"max-hops">>, 10}])
-     || {Cfg, Port} <- [{Cycle1, pget(port, Cycle2)},
-                        {Cycle2, pget(port, Cycle1)}]],
-
-    {_, Cycle1Ch} = rabbit_test_util:connect(Cycle1),
-    {_, Cycle2Ch} = rabbit_test_util:connect(Cycle2),
-
-    declare_exchange(Cycle1Ch, x(<<"cycle">>)),
-    declare_exchange(Cycle2Ch, x(<<"cycle">>)),
-
-    Q1 = bind_queue(Cycle1Ch, <<"cycle">>, <<"key">>),
-    Q2 = bind_queue(Cycle2Ch, <<"cycle">>, <<"key">>),
-
-    %% "key" present twice because once for the local queue and once
-    %% for federation in each case
-    await_binding(Cycle1, <<"cycle">>, <<"key">>, 2),
-    await_binding(Cycle2, <<"cycle">>, <<"key">>, 2),
-
-    publish(Cycle1Ch, <<"cycle">>, <<"key">>, <<"HELLO1">>),
-    publish(Cycle2Ch, <<"cycle">>, <<"key">>, <<"HELLO2">>),
-
-    Msgs = [<<"HELLO1">>, <<"HELLO2">>],
-    expect(Cycle1Ch, Q1, Msgs),
-    expect(Cycle2Ch, Q2, Msgs),
-    expect_empty(Cycle1Ch, Q1),
-    expect_empty(Cycle2Ch, Q2),
-
-    ok.
-
-%% Arrows indicate message flow. Numbers indicate max_hops.
-%%
-%% Dylan ---1--> Bugs ---2--> Jessica
-%% |^                              |^
-%% |\--------------1---------------/|
-%% \---------------1----------------/
-%%
-%%
-%% We want to demonstrate that if we bind a queue locally at each
-%% broker, (exactly) the following bindings propagate:
-%%
-%% Bugs binds to Dylan
-%% Jessica binds to Bugs, which then propagates on to Dylan
-%% Jessica binds to Dylan directly
-%% Dylan binds to Jessica.
-%%
-%% i.e. Dylan has two bindings from Jessica and one from Bugs
-%%      Bugs has one binding from Jessica
-%%      Jessica has one binding from Dylan
-%%
-%% So we tag each binding with its original broker and see how far it gets
-%%
-%% Also we check that when we tear down the original bindings
-%% that we get rid of everything again.
-
-binding_propagation_with() -> disambiguate(start_abc).
-binding_propagation([Dylan, Bugs, Jessica]) ->
-    set_policy_upstream( Dylan,   <<"^x$">>, <<"amqp://localhost:5674">>, []),
-    set_policy_upstream( Bugs,    <<"^x$">>, <<"amqp://localhost:5672">>, []),
-    set_policy_upstreams(Jessica, <<"^x$">>, [{<<"amqp://localhost:5672">>, []},
-                                              {<<"amqp://localhost:5673">>,
-                                               [{<<"max-hops">>, 2}]}]),
-    {_, DylanCh}   = rabbit_test_util:connect(Dylan),
-    {_, BugsCh}    = rabbit_test_util:connect(Bugs),
-    {_, JessicaCh} = rabbit_test_util:connect(Jessica),
-
-    declare_exchange(DylanCh,   x(<<"x">>)),
-    declare_exchange(BugsCh,    x(<<"x">>)),
-    declare_exchange(JessicaCh, x(<<"x">>)),
-
-    Q1 = bind_queue(DylanCh,   <<"x">>, <<"dylan">>),
-    Q2 = bind_queue(BugsCh,    <<"x">>, <<"bugs">>),
-    Q3 = bind_queue(JessicaCh, <<"x">>, <<"jessica">>),
-
-    await_binding( Dylan,   <<"x">>, <<"jessica">>, 2),
-    await_bindings(Dylan,   <<"x">>, [<<"bugs">>, <<"dylan">>]),
-    await_bindings(Bugs,    <<"x">>, [<<"jessica">>, <<"bugs">>]),
-    await_bindings(Jessica, <<"x">>, [<<"dylan">>, <<"jessica">>]),
-
-    delete_queue(DylanCh,   Q1),
-    delete_queue(BugsCh,    Q2),
-    delete_queue(JessicaCh, Q3),
-
-    await_bindings(Dylan,   <<"x">>, []),
-    await_bindings(Bugs,    <<"x">>, []),
-    await_bindings(Jessica, <<"x">>, []),
-
-    ok.
-
-upstream_has_no_federation_with() ->
-    disambiguate(fun (Init) ->
-                         Inits = [Init, no_plugins(Init)],
-                         rabbit_test_configs:start_nodes(Inits, [a, b])
-                 end).
-upstream_has_no_federation([Rabbit, Hare]) ->
-    set_policy_upstream(Rabbit, <<"^test$">>, <<"amqp://localhost:5673">>, []),
-    {_, Downstream} = rabbit_test_util:connect(Rabbit),
-    {_, Upstream}   = rabbit_test_util:connect(Hare),
-    declare_exchange(Upstream, x(<<"test">>)),
-    declare_exchange(Downstream, x(<<"test">>)),
-    Q = bind_queue(Downstream, <<"test">>, <<"routing">>),
-    await_binding(Hare, <<"test">>, <<"routing">>),
-    publish(Upstream, <<"test">>, <<"routing">>, <<"HELLO">>),
-    expect(Downstream, Q, [<<"HELLO">>]),
-    ok.
-
-dynamic_reconfiguration_test() ->
-    Cfg = single_cfg(),
-    with_ch(
-      fun (_Ch) ->
-              Xs = [<<"all.fed1">>, <<"all.fed2">>],
-              %% Left from the conf we set up for previous tests
-              assert_connections(Xs, [<<"localhost">>, <<"local5673">>]),
-
-              %% Test that clearing connections works
-              clear_upstream(Cfg, <<"localhost">>),
-              clear_upstream(Cfg, <<"local5673">>),
-              assert_connections(Xs, []),
-
-              %% Test that readding them and changing them works
-              set_upstream(Cfg, <<"localhost">>, <<"amqp://localhost">>),
-              %% Do it twice so we at least hit the no-restart optimisation
-              set_upstream(Cfg, <<"localhost">>, <<"amqp://">>),
-              set_upstream(Cfg, <<"localhost">>, <<"amqp://">>),
-              assert_connections(Xs, [<<"localhost">>]),
-
-              %% And re-add the last - for next test
-              set_upstream(Cfg, <<"local5673">>, <<"amqp://localhost:5673">>)
-      end, [x(<<"all.fed1">>), x(<<"all.fed2">>)]).
-
-dynamic_reconfiguration_integrity_test() ->
-    Cfg = single_cfg(),
-    with_ch(
-      fun (_Ch) ->
-              Xs = [<<"new.fed1">>, <<"new.fed2">>],
-
-              %% Declared exchanges with nonexistent set - no links
-              assert_connections(Xs, []),
-
-              %% Create the set - links appear
-              set_upstream_set(Cfg, <<"new-set">>, [{<<"localhost">>, []}]),
-              assert_connections(Xs, [<<"localhost">>]),
-
-              %% Add nonexistent connections to set - nothing breaks
-              set_upstream_set(
-                Cfg, <<"new-set">>, [{<<"localhost">>, []},
-                                     {<<"does-not-exist">>, []}]),
-              assert_connections(Xs, [<<"localhost">>]),
-
-              %% Change connection in set - links change
-              set_upstream_set(Cfg, <<"new-set">>, [{<<"local5673">>, []}]),
-              assert_connections(Xs, [<<"local5673">>])
-      end, [x(<<"new.fed1">>), x(<<"new.fed2">>)]).
-
-federate_unfederate_test() ->
-    Cfg = single_cfg(),
-    with_ch(
-      fun (_Ch) ->
-              Xs = [<<"dyn.exch1">>, <<"dyn.exch2">>],
-
-              %% Declared non-federated exchanges - no links
-              assert_connections(Xs, []),
-
-              %% Federate them - links appear
-              set_policy(Cfg, <<"dyn">>, <<"^dyn\\.">>, <<"all">>),
-              assert_connections(Xs, [<<"localhost">>, <<"local5673">>]),
-
-              %% Change policy - links change
-              set_policy(Cfg, <<"dyn">>, <<"^dyn\\.">>, <<"localhost">>),
-              assert_connections(Xs, [<<"localhost">>]),
-
-              %% Unfederate them - links disappear
-              clear_policy(Cfg, <<"dyn">>),
-              assert_connections(Xs, [])
-      end, [x(<<"dyn.exch1">>), x(<<"dyn.exch2">>)]).
-
-dynamic_plugin_stop_start_test() ->
-    Cfg = single_cfg(),
-    X1 = <<"dyn.exch1">>,
-    X2 = <<"dyn.exch2">>,
-    with_ch(
-      fun (Ch) ->
-              set_policy(Cfg, <<"dyn">>, <<"^dyn\\.">>, <<"localhost">>),
-
-              %% Declare federated exchange - get link
-              assert_connections([X1], [<<"localhost">>]),
-
-              %% Disable plugin, link goes
-              ok = disable_plugin(Cfg, "rabbitmq_federation"),
-              %% We can't check with status for obvious reasons...
-              undefined = whereis(rabbit_federation_sup),
-              {error, not_found} = rabbit_registry:lookup_module(
-                                     exchange, 'x-federation-upstream'),
-
-              %% Create exchange then re-enable plugin, links appear
-              declare_exchange(Ch, x(X2)),
-              ok = enable_plugin(Cfg, "rabbitmq_federation"),
-              assert_connections([X1, X2], [<<"localhost">>]),
-              {ok, _} = rabbit_registry:lookup_module(
-                          exchange, 'x-federation-upstream'),
-
-              %% Test both exchanges work. They are just federated to
-              %% themselves so should duplicate messages.
-              [begin
-                   Q = bind_queue(Ch, X, <<"key">>),
-                   await_binding(Cfg, X, <<"key">>, 2),
-                   publish(Ch, X, <<"key">>, <<"HELLO">>),
-                   expect(Ch, Q, [<<"HELLO">>, <<"HELLO">>]),
-                   delete_queue(Ch, Q)
-               end || X <- [X1, X2]],
-
-              clear_policy(Cfg, <<"dyn">>),
-              assert_connections([X1, X2], [])
-      end, [x(X1)]).
-
-%%----------------------------------------------------------------------------
-
-with_ch(Fun, Xs) ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    declare_all(Ch, Xs),
-    rabbit_federation_test_util:assert_status(
-      Xs, {exchange, upstream_exchange}),
-    Fun(Ch),
-    delete_all(Ch, Xs),
-    amqp_connection:close(Conn),
-    cleanup(single_cfg()),
-    ok.
-
-cleanup(Cfg) ->
-    [rpc:call(pget(node, Cfg), rabbit_amqqueue, delete, [Q, false, false]) ||
-        Q <- queues(pget(node, Cfg))].
-
-queues(Node) ->
-    case rpc:call(Node, rabbit_amqqueue, list, [<<"/">>]) of
-        {badrpc, _} -> [];
-        Qs          -> Qs
-    end.
-
-stop_other_node(Node) ->
-    cleanup(Node),
-    rabbit_federation_test_util:stop_other_node(Node).
-
-declare_all(Ch, Xs) -> [declare_exchange(Ch, X) || X <- Xs].
-delete_all(Ch, Xs) ->
-    [delete_exchange(Ch, X) || #'exchange.declare'{exchange = X} <- Xs].
-
-declare_exchange(Ch, X) ->
-    amqp_channel:call(Ch, X).
-
-x(Name) -> x(Name, <<"topic">>).
-
-x(Name, Type) ->
-    #'exchange.declare'{exchange = Name,
-                        type     = Type,
-                        durable  = true}.
-
-r(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
-
-declare_queue(Ch) ->
-    #'queue.declare_ok'{queue = Q} =
-        amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
-    Q.
-
-bind_queue(Ch, Q, X, Key) ->
-    amqp_channel:call(Ch, #'queue.bind'{queue       = Q,
-                                        exchange    = X,
-                                        routing_key = Key}).
-
-unbind_queue(Ch, Q, X, Key) ->
-    amqp_channel:call(Ch, #'queue.unbind'{queue       = Q,
-                                          exchange    = X,
-                                          routing_key = Key}).
-
-bind_exchange(Ch, D, S, Key) ->
-    amqp_channel:call(Ch, #'exchange.bind'{destination = D,
-                                           source      = S,
-                                           routing_key = Key}).
-
-bind_queue(Ch, X, Key) ->
-    Q = declare_queue(Ch),
-    bind_queue(Ch, Q, X, Key),
-    Q.
-
-delete_exchange(Ch, X) ->
-    amqp_channel:call(Ch, #'exchange.delete'{exchange = X}).
-
-delete_queue(Ch, Q) ->
-    amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
-
-await_binding(X, Key)         -> await_binding(single_cfg(), X, Key, 1).
-await_binding(X, Key, Count)
-  when is_binary(X)           -> await_binding(single_cfg(), X, Key, Count);
-await_binding(Broker, X, Key) -> await_binding(Broker,       X, Key, 1).
-
-await_binding(Node, X, Key, Count) when is_atom(Node) ->
-    case bound_keys_from(Node, X, Key) of
-        L when length(L) <   Count -> timer:sleep(100),
-                                      await_binding(Node, X, Key, Count);
-        L when length(L) =:= Count -> ok;
-        L                          -> exit({too_many_bindings,
-                                            X, Key, Count, L})
-    end;
-await_binding(Cfg, X, Key, Count) ->
-     await_binding(pget(node, Cfg), X, Key, Count).
-
-await_bindings(Broker, X, Keys) ->
-    [await_binding(Broker, X, Key) || Key <- Keys].
-
-await_binding_absent(Node, X, Key) when is_atom(Node) ->
-    case bound_keys_from(Node, X, Key) of
-        [] -> ok;
-        _  -> timer:sleep(100),
-              await_binding_absent(Node, X, Key)
-    end;
-await_binding_absent(Cfg, X, Key) ->
-     await_binding_absent(pget(node, Cfg), X, Key).
-
-bound_keys_from(Node, X, Key) ->
-    [K || #binding{key = K} <-
-              rpc:call(Node, rabbit_binding, list_for_source, [r(X)]),
-          K =:= Key].
-
-publish(Ch, X, Key, Payload) when is_binary(Payload) ->
-    publish(Ch, X, Key, #amqp_msg{payload = Payload});
-
-publish(Ch, X, Key, Msg = #amqp_msg{}) ->
-    amqp_channel:call(Ch, #'basic.publish'{exchange    = X,
-                                           routing_key = Key}, Msg).
-
-publish_expect(Ch, X, Key, Q, Payload) ->
-    publish(Ch, X, Key, Payload),
-    expect(Ch, Q, [Payload]).
-
-%%----------------------------------------------------------------------------
-
-assert_connections(Xs, Conns) ->
-    Links = [{X, C, X} ||
-                X <- Xs,
-                C <- Conns],
-    Remaining = lists:foldl(
-                  fun (Link, Status) ->
-                          rabbit_federation_test_util:assert_link_status(
-                            Link, Status, {exchange, upstream_exchange})
-                  end, rabbit_federation_status:status(), Links),
-    ?assertEqual([], Remaining),
-    ok.
-
-connection_pids(Node) ->
-    [P || [{pid, P}] <-
-              rpc:call(Node, rabbit_networking, connection_info_all, [[pid]])].
diff --git a/rabbitmq-server/deps/rabbitmq_federation/test/src/rabbit_federation_test_util.erl b/rabbitmq-server/deps/rabbitmq_federation/test/src/rabbit_federation_test_util.erl
deleted file mode 100644 (file)
index 75a180b..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ Federation.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(rabbit_federation_test_util).
-
--include("rabbit_federation.hrl").
--include_lib("eunit/include/eunit.hrl").
--include_lib("amqp_client/include/amqp_client.hrl").
-
--compile(export_all).
-
--import(rabbit_misc, [pget/2]).
-
-expect(Ch, Q, Fun) when is_function(Fun) ->
-    amqp_channel:subscribe(Ch, #'basic.consume'{queue  = Q,
-                                                no_ack = true}, self()),
-    receive
-        #'basic.consume_ok'{consumer_tag = CTag} -> ok
-    end,
-    Fun(),
-    amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = CTag});
-
-expect(Ch, Q, Payloads) ->
-    expect(Ch, Q, fun() -> expect(Payloads) end).
-
-expect([]) ->
-    ok;
-expect(Payloads) ->
-    receive
-        {#'basic.deliver'{}, #amqp_msg{payload = Payload}} ->
-            case lists:member(Payload, Payloads) of
-                true  -> expect(Payloads -- [Payload]);
-                false -> throw({expected, Payloads, actual, Payload})
-            end
-    end.
-
-expect_empty(Ch, Q) ->
-    ?assertMatch(#'basic.get_empty'{},
-                 amqp_channel:call(Ch, #'basic.get'{ queue = Q })).
-
-set_upstream(Cfg, Name, URI) ->
-    set_upstream(Cfg, Name, URI, []).
-
-set_upstream(Cfg, Name, URI, Extra) ->
-    rabbit_test_util:set_param(Cfg, <<"federation-upstream">>, Name,
-                               [{<<"uri">>, URI} | Extra]).
-
-clear_upstream(Cfg, Name) ->
-    rabbit_test_util:clear_param(Cfg, <<"federation-upstream">>, Name).
-
-set_upstream_set(Cfg, Name, Set) ->
-    rabbit_test_util:set_param(
-      Cfg, <<"federation-upstream-set">>, Name,
-      [[{<<"upstream">>, UStream} | Extra] || {UStream, Extra} <- Set]).
-
-set_policy(Cfg, Name, Pattern, UpstreamSet) ->
-    rabbit_test_util:set_policy(Cfg, Name, Pattern, <<"all">>,
-                                [{<<"federation-upstream-set">>, UpstreamSet}]).
-
-set_policy1(Cfg, Name, Pattern, Upstream) ->
-    rabbit_test_util:set_policy(Cfg, Name, Pattern, <<"all">>,
-                                [{<<"federation-upstream">>, Upstream}]).
-
-clear_policy(Cfg, Name) ->
-    rabbit_test_util:clear_policy(Cfg, Name).
-
-set_policy_upstream(Cfg, Pattern, URI, Extra) ->
-    set_policy_upstreams(Cfg, Pattern, [{URI, Extra}]).
-
-set_policy_upstreams(Cfg, Pattern, URIExtras) ->
-    put(upstream_num, 1),
-    [set_upstream(Cfg, gen_upstream_name(), URI, Extra)
-     || {URI, Extra} <- URIExtras],
-    set_policy(Cfg, Pattern, Pattern, <<"all">>).
-
-gen_upstream_name() ->
-    list_to_binary("upstream-" ++ integer_to_list(next_upstream_num())).
-
-next_upstream_num() ->
-    R = get(upstream_num) + 1,
-    put (upstream_num, R),
-    R.
-
-%% Make sure that even though multiple nodes are in a single
-%% distributed system, we still keep all our process groups separate.
-disambiguate(Rest) ->
-    [Rest,
-     fun (Cfgs) ->
-             [rpc:call(pget(node, Cfg), application, set_env,
-                       [rabbitmq_federation, pgroup_name_cluster_id, true])
-              || Cfg <- Cfgs],
-             Cfgs
-     end].
-
-no_plugins(Cfg) ->
-    [{K, case K of
-             plugins -> none;
-             _       -> V
-         end} || {K, V} <- Cfg].
-
-%% "fake" cfg to let us use various utility functions when running
-%% in-broker tests
-single_cfg() ->
-    Nodename = list_to_atom(os:getenv("RABBITMQ_NODENAME")),
-    [{nodename, Nodename},
-     {node,     rabbit_nodes:make(Nodename)},
-     {port,     5672}].
-
-%%----------------------------------------------------------------------------
-
-assert_status(XorQs, Names) ->
-    Links = lists:append([links(XorQ) || XorQ <- XorQs]),
-    Remaining = lists:foldl(fun (Link, Status) ->
-                                    assert_link_status(Link, Status, Names)
-                            end, rabbit_federation_status:status(), Links),
-    ?assertEqual([], Remaining),
-    ok.
-
-assert_link_status({DXorQNameBin, UpstreamName, UXorQNameBin}, Status,
-                   {TypeName, UpstreamTypeName}) ->
-    {This, Rest} = lists:partition(
-                     fun(St) ->
-                             pget(upstream, St) =:= UpstreamName andalso
-                                 pget(TypeName, St) =:= DXorQNameBin andalso
-                                 pget(UpstreamTypeName, St) =:= UXorQNameBin
-                     end, Status),
-    ?assertMatch([_], This),
-    Rest.
-
-links(#'exchange.declare'{exchange = Name}) ->
-    case rabbit_policy:get(<<"federation-upstream-set">>, xr(Name)) of
-        undefined -> [];
-        Set       -> X = #exchange{name = xr(Name)},
-                     [{Name, U#upstream.name, U#upstream.exchange_name} ||
-                         U <- rabbit_federation_upstream:from_set(Set, X)]
-    end;
-links(#'queue.declare'{queue = Name}) ->
-    case rabbit_policy:get(<<"federation-upstream-set">>, qr(Name)) of
-        undefined -> [];
-        Set       -> Q = #amqqueue{name = qr(Name)},
-                     [{Name, U#upstream.name, U#upstream.queue_name} ||
-                         U <- rabbit_federation_upstream:from_set(Set, Q)]
-    end.
-
-xr(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
-qr(Name) -> rabbit_misc:r(<<"/">>, queue, Name).
similarity index 63%
rename from rabbitmq-server/deps/rabbitmq_federation/test/src/rabbit_federation_unit_test.erl
rename to rabbitmq-server/deps/rabbitmq_federation/test/unit_inbroker_SUITE.erl
index e6806230892882a19fb20260c1f8eefc020cb7b9..e172f4fdcf1a2523249137657f681285c71c3287 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_federation_unit_test).
+-module(unit_inbroker_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-include("rabbit_federation.hrl").
+
+-compile(export_all).
 
 -define(US_NAME, <<"upstream">>).
 -define(DS_NAME, <<"fed.downstream">>).
 
--include("rabbit_federation.hrl").
--include_lib("eunit/include/eunit.hrl").
--include_lib("rabbit_common/include/rabbit.hrl").
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+          serialisation,
+          scratch_space,
+          remove_credentials
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE}
+      ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
 
 %% Test that we apply binding changes in the correct order even when
 %% they arrive out of order.
-serialisation_test() ->
+serialisation(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, serialisation1, []).
+
+serialisation1() ->
     with_exchanges(
       fun(X) ->
               [B1, B2, B3] = [b(K) || K <- [<<"1">>, <<"2">>, <<"3">>]],
@@ -37,33 +92,14 @@ serialisation_test() ->
               %% List of lists because one for each link
               Keys = rabbit_federation_exchange_link:list_routing_keys(
                        X#exchange.name),
-              ?assertEqual([[<<"1">>, <<"2">>]], Keys)
+              [[<<"1">>, <<"2">>]] =:= Keys
       end).
 
-with_exchanges(Fun) ->
-    rabbit_exchange:declare(r(?US_NAME), fanout, false, false, false, []),
-    X = rabbit_exchange:declare(r(?DS_NAME), fanout, false, false, false, []),
-    Fun(X),
-    %% Delete downstream first or it will recreate the upstream
-    rabbit_exchange:delete(r(?DS_NAME), false),
-    rabbit_exchange:delete(r(?US_NAME), false),
-    ok.
-
-add_binding(Ser, X, B) ->
-    rabbit_federation_exchange:add_binding(transaction, X, B),
-    rabbit_federation_exchange:add_binding(Ser, X, B).
-
-remove_bindings(Ser, X, Bs) ->
-    rabbit_federation_exchange:remove_bindings(transaction, X, Bs),
-    rabbit_federation_exchange:remove_bindings(Ser, X, Bs).
-
-r(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
-
-b(Key) ->
-    #binding{source = ?DS_NAME, destination = <<"whatever">>,
-             key = Key, args = []}.
+scratch_space(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, scratch_space1, []).
 
-scratch_space_test() ->
+scratch_space1() ->
     A = <<"A">>,
     B = <<"B">>,
     DB = rabbit_federation_db,
@@ -74,19 +110,16 @@ scratch_space_test() ->
               DB:prune_scratch(N, [upstream(y), upstream(z)]),
               DB:set_active_suffix(N, upstream(y), B),
               DB:set_active_suffix(N, upstream(z), A),
-              ?assertEqual(none, DB:get_active_suffix(N, upstream(x), none)),
-              ?assertEqual(B,    DB:get_active_suffix(N, upstream(y), none)),
-              ?assertEqual(A,    DB:get_active_suffix(N, upstream(z), none))
+              none = DB:get_active_suffix(N, upstream(x), none),
+              B    = DB:get_active_suffix(N, upstream(y), none),
+              A    = DB:get_active_suffix(N, upstream(z), none)
       end).
 
-upstream(UpstreamName) ->
-    #upstream{name          = atom_to_list(UpstreamName),
-              exchange_name = <<"upstream">>}.
-
-remove_credentials_test() ->
+remove_credentials(Config) ->
     Test0 = fun (In, Exp) ->
-                    Act = rabbit_federation_upstream:remove_credentials(In),
-                    ?assertEqual(Exp, Act)
+                    Act = rabbit_ct_broker_helpers:rpc(Config, 0,
+                      rabbit_federation_upstream, remove_credentials, [In]),
+                    Exp = Act
             end,
     Cat = fun (Bs) ->
                   list_to_binary(lists:append([binary_to_list(B) || B <- Bs]))
@@ -105,3 +138,30 @@ remove_credentials_test() ->
     Test(<<"amqp://">>,  <<"localhost:5672/foo">>),
     Test(<<"amqps://">>, <<"localhost:5672/%2f">>),
     ok.
+
+with_exchanges(Fun) ->
+    rabbit_exchange:declare(r(?US_NAME), fanout, false, false, false, []),
+    X = rabbit_exchange:declare(r(?DS_NAME), fanout, false, false, false, []),
+    Fun(X),
+    %% Delete downstream first or it will recreate the upstream
+    rabbit_exchange:delete(r(?DS_NAME), false),
+    rabbit_exchange:delete(r(?US_NAME), false),
+    ok.
+
+add_binding(Ser, X, B) ->
+    rabbit_federation_exchange:add_binding(transaction, X, B),
+    rabbit_federation_exchange:add_binding(Ser, X, B).
+
+remove_bindings(Ser, X, Bs) ->
+    rabbit_federation_exchange:remove_bindings(transaction, X, Bs),
+    rabbit_federation_exchange:remove_bindings(Ser, X, Bs).
+
+r(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
+
+b(Key) ->
+    #binding{source = ?DS_NAME, destination = <<"whatever">>,
+             key = Key, args = []}.
+
+upstream(UpstreamName) ->
+    #upstream{name          = atom_to_list(UpstreamName),
+              exchange_name = <<"upstream">>}.
diff --git a/rabbitmq-server/deps/rabbitmq_federation_management/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_federation_management/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index 61e00750da2e587fd36ffa2e2f5964c83ce93489..d9e57b0f500c2e7fe183fc0556bd090e1dc04bc8 100644 (file)
@@ -33,6 +33,6 @@ and see under the `./plugins` directory.
 
 ## Copyright and License
 
-(c) Pivotal Software Inc, 2007-20016.
+(c) Pivotal Software Inc, 2007-2016.
 
 See `LICENSE` for license information.
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index f3b5c910ec43cffa99f479ef4c2055522d8d78cd..f10200452a721b5357a5ba50404592104fccc258 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_federation_management,
  [{description, "RabbitMQ Federation Management"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {env, []},
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/CONTRIBUTING.md b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..45bbcbe
--- /dev/null
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/LICENSE b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/LICENSE
new file mode 100644 (file)
index 0000000..53c9573
--- /dev/null
@@ -0,0 +1,461 @@
+                          MOZILLA PUBLIC LICENSE
+                                Version 1.1
+
+                              ---------------
+
+1. Definitions.
+
+     1.0.1. "Commercial Use" means distribution or otherwise making the
+     Covered Code available to a third party.
+
+     1.1. "Contributor" means each entity that creates or contributes to
+     the creation of Modifications.
+
+     1.2. "Contributor Version" means the combination of the Original
+     Code, prior Modifications used by a Contributor, and the Modifications
+     made by that particular Contributor.
+
+     1.3. "Covered Code" means the Original Code or Modifications or the
+     combination of the Original Code and Modifications, in each case
+     including portions thereof.
+
+     1.4. "Electronic Distribution Mechanism" means a mechanism generally
+     accepted in the software development community for the electronic
+     transfer of data.
+
+     1.5. "Executable" means Covered Code in any form other than Source
+     Code.
+
+     1.6. "Initial Developer" means the individual or entity identified
+     as the Initial Developer in the Source Code notice required by Exhibit
+     A.
+
+     1.7. "Larger Work" means a work which combines Covered Code or
+     portions thereof with code not governed by the terms of this License.
+
+     1.8. "License" means this document.
+
+     1.8.1. "Licensable" means having the right to grant, to the maximum
+     extent possible, whether at the time of the initial grant or
+     subsequently acquired, any and all of the rights conveyed herein.
+
+     1.9. "Modifications" means any addition to or deletion from the
+     substance or structure of either the Original Code or any previous
+     Modifications. When Covered Code is released as a series of files, a
+     Modification is:
+          A. Any addition to or deletion from the contents of a file
+          containing Original Code or previous Modifications.
+
+          B. Any new file that contains any part of the Original Code or
+          previous Modifications.
+
+     1.10. "Original Code" means Source Code of computer software code
+     which is described in the Source Code notice required by Exhibit A as
+     Original Code, and which, at the time of its release under this
+     License is not already Covered Code governed by this License.
+
+     1.10.1. "Patent Claims" means any patent claim(s), now owned or
+     hereafter acquired, including without limitation,  method, process,
+     and apparatus claims, in any patent Licensable by grantor.
+
+     1.11. "Source Code" means the preferred form of the Covered Code for
+     making modifications to it, including all modules it contains, plus
+     any associated interface definition files, scripts used to control
+     compilation and installation of an Executable, or source code
+     differential comparisons against either the Original Code or another
+     well known, available Covered Code of the Contributor's choice. The
+     Source Code can be in a compressed or archival form, provided the
+     appropriate decompression or de-archiving software is widely available
+     for no charge.
+
+     1.12. "You" (or "Your")  means an individual or a legal entity
+     exercising rights under, and complying with all of the terms of, this
+     License or a future version of this License issued under Section 6.1.
+     For legal entities, "You" includes any entity which controls, is
+     controlled by, or is under common control with You. For purposes of
+     this definition, "control" means (a) the power, direct or indirect,
+     to cause the direction or management of such entity, whether by
+     contract or otherwise, or (b) ownership of more than fifty percent
+     (50%) of the outstanding shares or beneficial ownership of such
+     entity.
+
+2. Source Code License.
+
+     2.1. The Initial Developer Grant.
+     The Initial Developer hereby grants You a world-wide, royalty-free,
+     non-exclusive license, subject to third party intellectual property
+     claims:
+          (a)  under intellectual property rights (other than patent or
+          trademark) Licensable by Initial Developer to use, reproduce,
+          modify, display, perform, sublicense and distribute the Original
+          Code (or portions thereof) with or without Modifications, and/or
+          as part of a Larger Work; and
+
+          (b) under Patents Claims infringed by the making, using or
+          selling of Original Code, to make, have made, use, practice,
+          sell, and offer for sale, and/or otherwise dispose of the
+          Original Code (or portions thereof).
+
+          (c) the licenses granted in this Section 2.1(a) and (b) are
+          effective on the date Initial Developer first distributes
+          Original Code under the terms of this License.
+
+          (d) Notwithstanding Section 2.1(b) above, no patent license is
+          granted: 1) for code that You delete from the Original Code; 2)
+          separate from the Original Code;  or 3) for infringements caused
+          by: i) the modification of the Original Code or ii) the
+          combination of the Original Code with other software or devices.
+
+     2.2. Contributor Grant.
+     Subject to third party intellectual property claims, each Contributor
+     hereby grants You a world-wide, royalty-free, non-exclusive license
+
+          (a)  under intellectual property rights (other than patent or
+          trademark) Licensable by Contributor, to use, reproduce, modify,
+          display, perform, sublicense and distribute the Modifications
+          created by such Contributor (or portions thereof) either on an
+          unmodified basis, with other Modifications, as Covered Code
+          and/or as part of a Larger Work; and
+
+          (b) under Patent Claims infringed by the making, using, or
+          selling of  Modifications made by that Contributor either alone
+          and/or in combination with its Contributor Version (or portions
+          of such combination), to make, use, sell, offer for sale, have
+          made, and/or otherwise dispose of: 1) Modifications made by that
+          Contributor (or portions thereof); and 2) the combination of
+          Modifications made by that Contributor with its Contributor
+          Version (or portions of such combination).
+
+          (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+          effective on the date Contributor first makes Commercial Use of
+          the Covered Code.
+
+          (d)    Notwithstanding Section 2.2(b) above, no patent license is
+          granted: 1) for any code that Contributor has deleted from the
+          Contributor Version; 2)  separate from the Contributor Version;
+          3)  for infringements caused by: i) third party modifications of
+          Contributor Version or ii)  the combination of Modifications made
+          by that Contributor with other software  (except as part of the
+          Contributor Version) or other devices; or 4) under Patent Claims
+          infringed by Covered Code in the absence of Modifications made by
+          that Contributor.
+
+3. Distribution Obligations.
+
+     3.1. Application of License.
+     The Modifications which You create or to which You contribute are
+     governed by the terms of this License, including without limitation
+     Section 2.2. The Source Code version of Covered Code may be
+     distributed only under the terms of this License or a future version
+     of this License released under Section 6.1, and You must include a
+     copy of this License with every copy of the Source Code You
+     distribute. You may not offer or impose any terms on any Source Code
+     version that alters or restricts the applicable version of this
+     License or the recipients' rights hereunder. However, You may include
+     an additional document offering the additional rights described in
+     Section 3.5.
+
+     3.2. Availability of Source Code.
+     Any Modification which You create or to which You contribute must be
+     made available in Source Code form under the terms of this License
+     either on the same media as an Executable version or via an accepted
+     Electronic Distribution Mechanism to anyone to whom you made an
+     Executable version available; and if made available via Electronic
+     Distribution Mechanism, must remain available for at least twelve (12)
+     months after the date it initially became available, or at least six
+     (6) months after a subsequent version of that particular Modification
+     has been made available to such recipients. You are responsible for
+     ensuring that the Source Code version remains available even if the
+     Electronic Distribution Mechanism is maintained by a third party.
+
+     3.3. Description of Modifications.
+     You must cause all Covered Code to which You contribute to contain a
+     file documenting the changes You made to create that Covered Code and
+     the date of any change. You must include a prominent statement that
+     the Modification is derived, directly or indirectly, from Original
+     Code provided by the Initial Developer and including the name of the
+     Initial Developer in (a) the Source Code, and (b) in any notice in an
+     Executable version or related documentation in which You describe the
+     origin or ownership of the Covered Code.
+
+     3.4. Intellectual Property Matters
+          (a) Third Party Claims.
+          If Contributor has knowledge that a license under a third party's
+          intellectual property rights is required to exercise the rights
+          granted by such Contributor under Sections 2.1 or 2.2,
+          Contributor must include a text file with the Source Code
+          distribution titled "LEGAL" which describes the claim and the
+          party making the claim in sufficient detail that a recipient will
+          know whom to contact. If Contributor obtains such knowledge after
+          the Modification is made available as described in Section 3.2,
+          Contributor shall promptly modify the LEGAL file in all copies
+          Contributor makes available thereafter and shall take other steps
+          (such as notifying appropriate mailing lists or newsgroups)
+          reasonably calculated to inform those who received the Covered
+          Code that new knowledge has been obtained.
+
+          (b) Contributor APIs.
+          If Contributor's Modifications include an application programming
+          interface and Contributor has knowledge of patent licenses which
+          are reasonably necessary to implement that API, Contributor must
+          also include this information in the LEGAL file.
+
+               (c)    Representations.
+          Contributor represents that, except as disclosed pursuant to
+          Section 3.4(a) above, Contributor believes that Contributor's
+          Modifications are Contributor's original creation(s) and/or
+          Contributor has sufficient rights to grant the rights conveyed by
+          this License.
+
+     3.5. Required Notices.
+     You must duplicate the notice in Exhibit A in each file of the Source
+     Code.  If it is not possible to put such notice in a particular Source
+     Code file due to its structure, then You must include such notice in a
+     location (such as a relevant directory) where a user would be likely
+     to look for such a notice.  If You created one or more Modification(s)
+     You may add your name as a Contributor to the notice described in
+     Exhibit A.  You must also duplicate this License in any documentation
+     for the Source Code where You describe recipients' rights or ownership
+     rights relating to Covered Code.  You may choose to offer, and to
+     charge a fee for, warranty, support, indemnity or liability
+     obligations to one or more recipients of Covered Code. However, You
+     may do so only on Your own behalf, and not on behalf of the Initial
+     Developer or any Contributor. You must make it absolutely clear than
+     any such warranty, support, indemnity or liability obligation is
+     offered by You alone, and You hereby agree to indemnify the Initial
+     Developer and every Contributor for any liability incurred by the
+     Initial Developer or such Contributor as a result of warranty,
+     support, indemnity or liability terms You offer.
+
+     3.6. Distribution of Executable Versions.
+     You may distribute Covered Code in Executable form only if the
+     requirements of Section 3.1-3.5 have been met for that Covered Code,
+     and if You include a notice stating that the Source Code version of
+     the Covered Code is available under the terms of this License,
+     including a description of how and where You have fulfilled the
+     obligations of Section 3.2. The notice must be conspicuously included
+     in any notice in an Executable version, related documentation or
+     collateral in which You describe recipients' rights relating to the
+     Covered Code. You may distribute the Executable version of Covered
+     Code or ownership rights under a license of Your choice, which may
+     contain terms different from this License, provided that You are in
+     compliance with the terms of this License and that the license for the
+     Executable version does not attempt to limit or alter the recipient's
+     rights in the Source Code version from the rights set forth in this
+     License. If You distribute the Executable version under a different
+     license You must make it absolutely clear that any terms which differ
+     from this License are offered by You alone, not by the Initial
+     Developer or any Contributor. You hereby agree to indemnify the
+     Initial Developer and every Contributor for any liability incurred by
+     the Initial Developer or such Contributor as a result of any such
+     terms You offer.
+
+     3.7. Larger Works.
+     You may create a Larger Work by combining Covered Code with other code
+     not governed by the terms of this License and distribute the Larger
+     Work as a single product. In such a case, You must make sure the
+     requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+     If it is impossible for You to comply with any of the terms of this
+     License with respect to some or all of the Covered Code due to
+     statute, judicial order, or regulation then You must: (a) comply with
+     the terms of this License to the maximum extent possible; and (b)
+     describe the limitations and the code they affect. Such description
+     must be included in the LEGAL file described in Section 3.4 and must
+     be included with all distributions of the Source Code. Except to the
+     extent prohibited by statute or regulation, such description must be
+     sufficiently detailed for a recipient of ordinary skill to be able to
+     understand it.
+
+5. Application of this License.
+
+     This License applies to code to which the Initial Developer has
+     attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+     6.1. New Versions.
+     Netscape Communications Corporation ("Netscape") may publish revised
+     and/or new versions of the License from time to time. Each version
+     will be given a distinguishing version number.
+
+     6.2. Effect of New Versions.
+     Once Covered Code has been published under a particular version of the
+     License, You may always continue to use it under the terms of that
+     version. You may also choose to use such Covered Code under the terms
+     of any subsequent version of the License published by Netscape. No one
+     other than Netscape has the right to modify the terms applicable to
+     Covered Code created under this License.
+
+     6.3. Derivative Works.
+     If You create or use a modified version of this License (which you may
+     only do in order to apply it to code which is not already Covered Code
+     governed by this License), You must (a) rename Your license so that
+     the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+     "MPL", "NPL" or any confusingly similar phrase do not appear in your
+     license (except to note that your license differs from this License)
+     and (b) otherwise make it clear that Your version of the license
+     contains terms which differ from the Mozilla Public License and
+     Netscape Public License. (Filling in the name of the Initial
+     Developer, Original Code or Contributor in the notice described in
+     Exhibit A shall not of themselves be deemed to be modifications of
+     this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+     COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+     WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+     WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+     DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+     THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+     IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+     YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+     COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+     OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+     ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+     8.1.  This License and the rights granted hereunder will terminate
+     automatically if You fail to comply with terms herein and fail to cure
+     such breach within 30 days of becoming aware of the breach. All
+     sublicenses to the Covered Code which are properly granted shall
+     survive any termination of this License. Provisions which, by their
+     nature, must remain in effect beyond the termination of this License
+     shall survive.
+
+     8.2.  If You initiate litigation by asserting a patent infringement
+     claim (excluding declatory judgment actions) against Initial Developer
+     or a Contributor (the Initial Developer or Contributor against whom
+     You file such action is referred to as "Participant")  alleging that:
+
+     (a)  such Participant's Contributor Version directly or indirectly
+     infringes any patent, then any and all rights granted by such
+     Participant to You under Sections 2.1 and/or 2.2 of this License
+     shall, upon 60 days notice from Participant terminate prospectively,
+     unless if within 60 days after receipt of notice You either: (i)
+     agree in writing to pay Participant a mutually agreeable reasonable
+     royalty for Your past and future use of Modifications made by such
+     Participant, or (ii) withdraw Your litigation claim with respect to
+     the Contributor Version against such Participant.  If within 60 days
+     of notice, a reasonable royalty and payment arrangement are not
+     mutually agreed upon in writing by the parties or the litigation claim
+     is not withdrawn, the rights granted by Participant to You under
+     Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+     the 60 day notice period specified above.
+
+     (b)  any software, hardware, or device, other than such Participant's
+     Contributor Version, directly or indirectly infringes any patent, then
+     any rights granted to You by such Participant under Sections 2.1(b)
+     and 2.2(b) are revoked effective as of the date You first made, used,
+     sold, distributed, or had made, Modifications made by that
+     Participant.
+
+     8.3.  If You assert a patent infringement claim against Participant
+     alleging that such Participant's Contributor Version directly or
+     indirectly infringes any patent where such claim is resolved (such as
+     by license or settlement) prior to the initiation of patent
+     infringement litigation, then the reasonable value of the licenses
+     granted by such Participant under Sections 2.1 or 2.2 shall be taken
+     into account in determining the amount or value of any payment or
+     license.
+
+     8.4.  In the event of termination under Sections 8.1 or 8.2 above,
+     all end user license agreements (excluding distributors and resellers)
+     which have been validly granted by You or any distributor hereunder
+     prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+     UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+     (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+     DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+     OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+     ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+     CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+     WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+     COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+     INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+     LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+     RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+     PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+     EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+     THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+     The Covered Code is a "commercial item," as that term is defined in
+     48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+     software" and "commercial computer software documentation," as such
+     terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+     C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+     all U.S. Government End Users acquire Covered Code with only those
+     rights set forth herein.
+
+11. MISCELLANEOUS.
+
+     This License represents the complete agreement concerning subject
+     matter hereof. If any provision of this License is held to be
+     unenforceable, such provision shall be reformed only to the extent
+     necessary to make it enforceable. This License shall be governed by
+     California law provisions (except to the extent applicable law, if
+     any, provides otherwise), excluding its conflict-of-law provisions.
+     With respect to disputes in which at least one party is a citizen of,
+     or an entity chartered or registered to do business in the United
+     States of America, any litigation relating to this License shall be
+     subject to the jurisdiction of the Federal Courts of the Northern
+     District of California, with venue lying in Santa Clara County,
+     California, with the losing party responsible for costs, including
+     without limitation, court costs and reasonable attorneys' fees and
+     expenses. The application of the United Nations Convention on
+     Contracts for the International Sale of Goods is expressly excluded.
+     Any law or regulation which provides that the language of a contract
+     shall be construed against the drafter shall not apply to this
+     License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+     As between Initial Developer and the Contributors, each party is
+     responsible for claims and damages arising, directly or indirectly,
+     out of its utilization of rights under this License and You agree to
+     work with Initial Developer and Contributors to distribute such
+     responsibility on an equitable basis. Nothing herein is intended or
+     shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+     Initial Developer may designate portions of the Covered Code as
+     "Multiple-Licensed".  "Multiple-Licensed" means that the Initial
+     Developer permits you to utilize portions of the Covered Code under
+     Your choice of the NPL or the alternative licenses, if any, specified
+     by the Initial Developer in the file described in Exhibit A.
+
+EXHIBIT A -Mozilla Public License.
+
+     ``The contents of this file are subject to the Mozilla Public License
+     Version 1.1 (the "License"); you may not use this file except in
+     compliance with the License. You may obtain a copy of the License at
+     http://www.mozilla.org/MPL/
+
+     Software distributed under the License is distributed on an "AS IS"
+     basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+     License for the specific language governing rights and limitations
+     under the License.
+
+     The Original Code is rabbitmq-jms-topic-exchange.
+
+     The Initial Developer of the Original Code is Pivotal Software, Inc.
+
+     All Rights Reserved.
+
+     Contributor(s): ______________________________________.''
+
+     [NOTE: The text of this Exhibit A may differ slightly from the text of
+     the notices in the Source Code files of the Original Code. You should
+     use the text of this Exhibit A rather than the text found in the
+     Original Code Source Code for Your Modifications.]
+
+
+
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/LICENSES.txt b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/LICENSES.txt
new file mode 100644 (file)
index 0000000..43fc4b6
--- /dev/null
@@ -0,0 +1,865 @@
+open_source_license.txt\r
+\r
+JMS Client for Pivotal RabbitMQ 1.4.6 GA\r
+\r
+===========================================================================\r
+\r
+The following copyright statements and licenses apply to various open\r
+source software packages (or portions thereof) that are distributed with\r
+this Pivotal Software, Inc. Product.\r
+\r
+The Pivotal Product may also include other Pivotal components, which may\r
+contain additional open source software packages. One or more such\r
+open_source_licenses.txt files may therefore accompany this Pivotal\r
+Product.\r
+\r
+The Pivotal Product that includes this file does not necessarily use all\r
+the open source software packages referred to below and may also only\r
+use portions of a given package.\r
+\r
+\r
+=============== TABLE OF CONTENTS =============================\r
+\r
+\r
+The following is a listing of the open source components detailed in\r
+this document. This list is provided for your convenience; please read\r
+further if you wish to review the copyright notice(s) and the full text\r
+of the license associated with each component.\r
+\r
+\r
+\r
+\r
+SECTION 1: BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES\r
+\r
+   >>> slf4j-api-1.7.5\r
+\r
+\r
+\r
+SECTION 2: Apache License, V2.0\r
+\r
+   >>> geronimo-jms_1.1_spec-1.1.1\r
+\r
+\r
+\r
+SECTION 3: Mozilla Public License, V1.1\r
+\r
+   >>> amqp-client-3.5.6\r
+\r
+\r
+\r
+APPENDIX. Standard License Files\r
+\r
+   >>> Apache License, V2.0\r
+\r
+   >>> Mozilla Public License, V1.1\r
+\r
+\r
+\r
+--------------- SECTION 1:  BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES ----------\r
+\r
+BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES are applicable to the following component(s).\r
+\r
+\r
+>>> slf4j-api-1.7.5\r
+\r
+Copyright (c) 2004-2011 QOS.ch\r
+All rights reserved.\r
+\r
+Permission is hereby granted, free  of charge, to any person obtaining\r
+a  copy  of this  software  and  associated  documentation files  (the\r
+"Software"), to  deal in  the Software without  restriction, including\r
+without limitation  the rights to  use, copy, modify,  merge, publish,\r
+distribute,  sublicense, and/or sell  copies of  the Software,  and to\r
+permit persons to whom the Software  is furnished to do so, subject to\r
+the following conditions:\r
+\r
+The  above  copyright  notice  and  this permission  notice  shall  be\r
+included in all copies or substantial portions of the Software.\r
+\r
+THE  SOFTWARE IS  PROVIDED  "AS  IS", WITHOUT  WARRANTY  OF ANY  KIND,\r
+EXPRESS OR  IMPLIED, INCLUDING  BUT NOT LIMITED  TO THE  WARRANTIES OF\r
+MERCHANTABILITY,    FITNESS    FOR    A   PARTICULAR    PURPOSE    AND\r
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\r
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\r
+OF CONTRACT, TORT OR OTHERWISE,  ARISING FROM, OUT OF OR IN CONNECTION\r
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+\r
+\r
+--------------- SECTION 2: Apache License, V2.0 ----------\r
+\r
+Apache License, V2.0 is applicable to the following component(s).\r
+\r
+\r
+>>> geronimo-jms_1.1_spec-1.1.1\r
+\r
+Apache Geronimo \r
+Copyright 2003-2008 The Apache Software Foundation\r
+\r
+This product includes software developed by\r
+The Apache Software Foundation (http://www.apache.org/).\r
+\r
+\r
+--------------- SECTION 3: Mozilla Public License, V1.1 ----------\r
+\r
+Mozilla Public License, V1.1 is applicable to the following component(s).\r
+\r
+\r
+>>> amqp-client-3.5.6\r
+\r
+//  The contents of this file are subject to the Mozilla Public License\r
+//  Version 1.1 (the "License"); you may not use this file except in\r
+//  compliance with the License. You may obtain a copy of the License\r
+//  at http://www.mozilla.org/MPL/\r
+//\r
+//  Software distributed under the License is distributed on an "AS IS"\r
+//  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See\r
+//  the License for the specific language governing rights and\r
+//  limitations under the License.\r
+//\r
+//  The Original Code is RabbitMQ.\r
+//\r
+//  The Initial Developer of the Original Code is GoPivotal, Inc.\r
+//  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.\r
+\r
+ADDITIONAL LICENSE INFORMATION:\r
+\r
+> Apache 2.0\r
+\r
+amqp-client-3.5.6-sources.jar\com\rabbitmq\tools\json\JSONWriter.java\r
+\r
+/*\r
+   Copyright (c) 2006-2007 Frank Carver\r
+   Copyright (c) 2007-2015 Pivotal Software, Inc. All Rights Reserved\r
+\r
+   Licensed under the Apache License, Version 2.0 (the "License");\r
+   you may not use this file except in compliance with the License.\r
+   You may obtain a copy of the License at\r
+\r
+       http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+   Unless required by applicable law or agreed to in writing, software\r
+   distributed under the License is distributed on an "AS IS" BASIS,\r
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+   See the License for the specific language governing permissions and\r
+   limitations under the License.\r
+\r
+\r
+\r
+> Public Domain\r
+\r
+amqp-client-3.5.6-sources.jar\com\rabbitmq\client\impl\VariableLinkedBlockingQueue.java\r
+\r
+/*\r
+ * Modifications Copyright 2015 Pivotal Software, Inc and licenced as per\r
+ * the rest of the RabbitMQ Java client.\r
+ */\r
+* Written by Doug Lea with assistance from members of JCP JSR-166\r
+ * Expert Group and released to the public domain, as explained at\r
+ * http://creativecommons.org/licenses/publicdomain\r
+ */\r
+\r
+\r
+=============== APPENDIX. Standard License Files ============== \r
+\r
+\r
+\r
+--------------- SECTION 1: Apache License, V2.0 -----------\r
+\r
+Apache License \r
+\r
+Version 2.0, January 2004 \r
+http://www.apache.org/licenses/ \r
+\r
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION \r
+\r
+1. Definitions.\r
+\r
+"License" shall mean the terms and conditions for use, reproduction,\r
+and distribution as defined by Sections 1 through 9 of this document.\r
+\r
+"Licensor" shall mean the copyright owner or entity authorized by the\r
+copyright owner that is granting the License.  \r
+\r
+"Legal Entity" shall mean the union of the acting entity and all other\r
+entities that control, are controlled by, or are under common control\r
+with that entity. For the purposes of this definition, "control" means\r
+(i) the power, direct or indirect, to cause the direction or management\r
+of such entity, whether by contract or otherwise, or (ii) ownership\r
+of fifty percent (50%) or more of the outstanding shares, or (iii)\r
+beneficial ownership of such entity.\r
+\r
+"You" (or "Your") shall mean an individual or Legal Entity exercising\r
+permissions granted by this License.  \r
+\r
+"Source" form shall mean the preferred form for making modifications,\r
+including but not limited to software source code, documentation source,\r
+and configuration files.\r
+\r
+"Object" form shall mean any form resulting from mechanical transformation\r
+or translation of a Source form, including but not limited to compiled\r
+object code, generated documentation, and conversions to other media\r
+types.  \r
+\r
+"Work" shall mean the work of authorship, whether in Source or\r
+Object form, made available under the License, as indicated by a copyright\r
+notice that is included in or attached to the work (an example is provided\r
+in the Appendix below).  \r
+\r
+"Derivative Works" shall mean any work, whether in Source or Object form,\r
+that is based on (or derived from) the Work and for which the editorial\r
+revisions, annotations, elaborations, or other modifications represent,\r
+as a whole, an original work of authorship. For the purposes of this\r
+License, Derivative Works shall not include works that remain separable\r
+from, or merely link (or bind by name) to the interfaces of, the Work\r
+and Derivative Works thereof.\r
+\r
+"Contribution" shall mean any work of authorship, including the\r
+original version of the Work and any modifications or additions to\r
+that Work or Derivative Works thereof, that is intentionally submitted\r
+to Licensor for inclusion in the Work by the copyright owner or by an\r
+individual or Legal Entity authorized to submit on behalf of the copyright\r
+owner. For the purposes of this definition, "submitted" means any form of\r
+electronic, verbal, or written communication sent to the Licensor or its\r
+representatives, including but not limited to communication on electronic\r
+mailing lists, source code control systems, and issue tracking systems\r
+that are managed by, or on behalf of, the Licensor for the purpose of\r
+discussing and improving the Work, but excluding communication that is\r
+conspicuously marked or otherwise designated in writing by the copyright\r
+owner as "Not a Contribution."\r
+\r
+"Contributor" shall mean Licensor and any individual or Legal Entity\r
+on behalf of whom a Contribution has been received by Licensor and\r
+subsequently incorporated within the Work.\r
+\r
+2. Grant of Copyright License.\r
+Subject to the terms and conditions of this License, each Contributor\r
+hereby grants to You a perpetual, worldwide, non-exclusive, no-charge,\r
+royalty-free, irrevocable copyright license to reproduce, prepare\r
+Derivative Works of, publicly display, publicly perform, sublicense, and\r
+distribute the Work and such Derivative Works in Source or Object form.\r
+\r
+3. Grant of Patent License.\r
+Subject to the terms and conditions of this License, each Contributor\r
+hereby grants to You a perpetual, worldwide, non-exclusive, no-charge,\r
+royalty- free, irrevocable (except as stated in this section) patent\r
+license to make, have made, use, offer to sell, sell, import, and\r
+otherwise transfer the Work, where such license applies only to those\r
+patent claims licensable by such Contributor that are necessarily\r
+infringed by their Contribution(s) alone or by combination of\r
+their Contribution(s) with the Work to which such Contribution(s)\r
+was submitted. If You institute patent litigation against any entity\r
+(including a cross-claim or counterclaim in a lawsuit) alleging that the\r
+Work or a Contribution incorporated within the Work constitutes direct\r
+or contributory patent infringement, then any patent licenses granted\r
+to You under this License for that Work shall terminate as of the date\r
+such litigation is filed.\r
+\r
+4. Redistribution.\r
+You may reproduce and distribute copies of the Work or Derivative Works\r
+thereof in any medium, with or without modifications, and in Source or\r
+Object form, provided that You meet the following conditions:\r
+\r
+  a. You must give any other recipients of the Work or Derivative Works\r
+     a copy of this License; and\r
+\r
+  b. You must cause any modified files to carry prominent notices stating\r
+     that You changed the files; and\r
+\r
+  c. You must retain, in the Source form of any Derivative Works that\r
+     You distribute, all copyright, patent, trademark, and attribution\r
+     notices from the Source form of the Work, excluding those notices\r
+     that do not pertain to any part of the Derivative Works; and\r
+\r
+  d. If the Work includes a "NOTICE" text file as part of its\r
+     distribution, then any Derivative Works that You distribute must\r
+     include a readable copy of the attribution notices contained\r
+     within such NOTICE file, excluding those notices that do not\r
+     pertain to any part of the Derivative Works, in at least one of\r
+     the following places: within a NOTICE text file distributed as part\r
+     of the Derivative Works; within the Source form or documentation,\r
+     if provided along with the Derivative Works; or, within a display\r
+     generated by the Derivative Works, if and wherever such third-party\r
+     notices normally appear. The contents of the NOTICE file are for\r
+     informational purposes only and do not modify the License. You\r
+     may add Your own attribution notices within Derivative Works that\r
+     You distribute, alongside or as an addendum to the NOTICE text\r
+     from the Work, provided that such additional attribution notices\r
+     cannot be construed as modifying the License.  You may add Your own\r
+     copyright statement to Your modifications and may provide additional\r
+     or different license terms and conditions for use, reproduction, or\r
+     distribution of Your modifications, or for any such Derivative Works\r
+     as a whole, provided Your use, reproduction, and distribution of the\r
+     Work otherwise complies with the conditions stated in this License.\r
+\r
+5. Submission of Contributions.\r
+Unless You explicitly state otherwise, any Contribution intentionally\r
+submitted for inclusion in the Work by You to the Licensor shall be\r
+under the terms and conditions of this License, without any additional\r
+terms or conditions.  Notwithstanding the above, nothing herein shall\r
+supersede or modify the terms of any separate license agreement you may\r
+have executed with Licensor regarding such Contributions.\r
+\r
+6. Trademarks.\r
+This License does not grant permission to use the trade names, trademarks,\r
+service marks, or product names of the Licensor, except as required for\r
+reasonable and customary use in describing the origin of the Work and\r
+reproducing the content of the NOTICE file.\r
+\r
+7. Disclaimer of Warranty.\r
+Unless required by applicable law or agreed to in writing, Licensor\r
+provides the Work (and each Contributor provides its Contributions) on\r
+an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\r
+express or implied, including, without limitation, any warranties or\r
+conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR\r
+A PARTICULAR PURPOSE. You are solely responsible for determining the\r
+appropriateness of using or redistributing the Work and assume any risks\r
+associated with Your exercise of permissions under this License.\r
+\r
+8. Limitation of Liability.\r
+In no event and under no legal theory, whether in tort (including\r
+negligence), contract, or otherwise, unless required by applicable law\r
+(such as deliberate and grossly negligent acts) or agreed to in writing,\r
+shall any Contributor be liable to You for damages, including any direct,\r
+indirect, special, incidental, or consequential damages of any character\r
+arising as a result of this License or out of the use or inability to\r
+use the Work (including but not limited to damages for loss of goodwill,\r
+work stoppage, computer failure or malfunction, or any and all other\r
+commercial damages or losses), even if such Contributor has been advised\r
+of the possibility of such damages.\r
+\r
+9. Accepting Warranty or Additional Liability.\r
+While redistributing the Work or Derivative Works thereof, You may\r
+choose to offer, and charge a fee for, acceptance of support, warranty,\r
+indemnity, or other liability obligations and/or rights consistent with\r
+this License. However, in accepting such obligations, You may act only\r
+on Your own behalf and on Your sole responsibility, not on behalf of\r
+any other Contributor, and only if You agree to indemnify, defend, and\r
+hold each Contributor harmless for any liability incurred by, or claims\r
+asserted against, such Contributor by reason of your accepting any such\r
+warranty or additional liability.\r
+\r
+END OF TERMS AND CONDITIONS \r
+\r
+\r
+\r
+--------------- SECTION 2: Mozilla Public License, V1.1 -----------\r
+\r
+Mozilla Public License 1.1 (MPL 1.1)\r
+\r
+1. Definitions.\r
+\r
+      1.0.1. "Commercial Use" means distribution or otherwise making the\r
+         Covered Code available to a third party.\r
+\r
+   1.1. "Contributor" means each entity that creates or contributes to\r
+      the creation of Modifications.\r
+\r
+   1.2. "Contributor Version" means the combination of the Original Code,\r
+      prior Modifications used by a Contributor, and the Modifications\r
+      made by that particular Contributor.\r
+\r
+   1.3. "Covered Code" means the Original Code or Modifications or the\r
+      combination of the Original Code and Modifications, in each case\r
+      including portions thereof.\r
+\r
+   1.4. "Electronic Distribution Mechanism" means a mechanism generally\r
+      accepted in the software development community for the electronic\r
+      transfer of data.\r
+\r
+   1.5. "Executable" means Covered Code in any form other than Source\r
+      Code.\r
+\r
+   1.6. "Initial Developer" means the individual or entity identified\r
+      as the Initial Developer in the Source Code notice required by\r
+      Exhibit A.\r
+\r
+   1.7. "Larger Work" means a work which combines Covered Code or portions\r
+      thereof with code not governed by the terms of this License.\r
+\r
+   1.8. "License" means this document.\r
+\r
+      1.8.1. "Licensable" means having the right to grant, to the maximum\r
+         extent possible, whether at the time of the initial grant or\r
+         subsequently acquired, any and all of the rights conveyed herein.\r
+\r
+   1.9. "Modifications" means any addition to or deletion from the\r
+      substance or structure of either the Original Code or any previous\r
+      Modifications. When Covered Code is released as a series of files,\r
+      a Modification is:\r
+\r
+         A. Any addition to or deletion from the contents of a file\r
+            containing Original Code or previous Modifications.\r
+\r
+         B. Any new file that contains any part of the Original Code or\r
+            previous Modifications.\r
+\r
+   1.10. "Original Code" means Source Code of computer software code\r
+      which is described in the Source Code notice required by Exhibit\r
+      A as Original Code, and which, at the time of its release under\r
+      this License is not already Covered Code governed by this License.\r
+\r
+      1.10.1. "Patent Claims" means any patent claim(s), now owned or\r
+         hereafter acquired, including without limitation,  method,\r
+         process, and apparatus claims, in any patent Licensable by\r
+         grantor.\r
+\r
+   1.11. "Source Code" means the preferred form of the Covered Code for\r
+      making modifications to it, including all modules it contains,\r
+      plus any associated interface definition files, scripts used to\r
+      control compilation and installation of an Executable, or source\r
+      code differential comparisons against either the Original Code or\r
+      another well known, available Covered Code of the Contributor's\r
+      choice. The Source Code can be in a compressed or archival form,\r
+      provided the appropriate decompression or de-archiving software\r
+      is widely available for no charge.\r
+\r
+   1.12. "You" (or "Your")  means an individual or a legal entity exercising\r
+      rights under, and complying with all of the terms of, this License\r
+      or a future version of this License issued under Section 6.1.\r
+      For legal entities, "You" includes any entity which controls, is\r
+      controlled by, or is under common control with You. For purposes\r
+      of this definition, "control" means (a) the power, direct or\r
+      indirect, to cause the direction or management of such entity,\r
+      whether by contract or otherwise, or (b) ownership of more than\r
+      fifty percent (50%) of the outstanding shares or beneficial\r
+      ownership of such entity.\r
+\r
+2. Source Code License.\r
+\r
+   2.1. The Initial Developer Grant.\r
+      The Initial Developer hereby grants You a world-wide, royalty-free,\r
+      non-exclusive license, subject to third party intellectual property\r
+      claims:\r
+\r
+      (a) under intellectual property rights (other than patent or\r
+          trademark) Licensable by Initial Developer to use, reproduce,\r
+          modify, display, perform, sublicense and distribute the Original\r
+          Code (or portions thereof) with or without Modifications,\r
+          and/or as part of a Larger Work; and\r
+\r
+      (b) under Patents Claims infringed by the making, using or selling\r
+          of Original Code, to make, have made, use, practice, sell, and\r
+          offer for sale, and/or otherwise dispose of the Original Code\r
+          (or portions thereof).\r
+\r
+      (c) the licenses granted in this Section 2.1(a) and (b) are\r
+          effective on the date Initial Developer first distributes\r
+          Original Code under the terms of this License.\r
+\r
+      (d) Notwithstanding Section 2.1(b) above, no patent license is\r
+          granted: 1) for code that You delete from the Original Code;\r
+          2) separate from the Original Code;  or 3) for infringements\r
+          caused by: i) the modification of the Original Code or ii) the\r
+          combination of the Original Code with other software or devices.\r
+\r
+   2.2. Contributor Grant.\r
+\r
+      Subject to third party intellectual property claims, each Contributor\r
+      hereby grants You a world-wide, royalty-free, non-exclusive license\r
+\r
+      (a) under intellectual property rights (other than patent or\r
+          trademark) Licensable by Contributor, to use, reproduce, modify,\r
+          display, perform, sublicense and distribute the Modifications\r
+          created by such Contributor (or portions thereof) either on\r
+          an unmodified basis, with other Modifications, as Covered Code\r
+          and/or as part of a Larger Work; and\r
+\r
+      (b) under Patent Claims infringed by the making, using, or selling\r
+          of  Modifications made by that Contributor either alone and/or\r
+          in combination with its Contributor Version (or portions of such\r
+          combination), to make, use, sell, offer for sale, have made,\r
+          and/or otherwise dispose of: 1) Modifications made by that\r
+          Contributor (or portions thereof); and 2) the combination of\r
+          Modifications made by that Contributor with its Contributor\r
+          Version (or portions of such combination).\r
+\r
+      (c) the licenses granted in Sections 2.2(a) and 2.2(b) are effective\r
+          on the date Contributor first makes Commercial Use of the\r
+          Covered Code.\r
+\r
+      (d) Notwithstanding Section 2.2(b) above, no patent license is\r
+          granted: 1) for any code that Contributor has deleted from\r
+          the Contributor Version; 2) separate from the Contributor\r
+          Version; 3) for infringements caused by: i) third party\r
+          modifications of Contributor Version or ii)  the combination\r
+          of Modifications made by that Contributor with other software\r
+          (except as part of the Contributor Version) or other devices;\r
+          or 4) under Patent Claims infringed by Covered Code in the\r
+          absence of Modifications made by that Contributor.\r
+\r
+3. Distribution Obligations.\r
+\r
+   3.1. Application of License.\r
+\r
+      The Modifications which You create or to which You contribute\r
+      are governed by the terms of this License, including without\r
+      limitation Section 2.2.  The Source Code version of Covered Code\r
+      may be distributed only under the terms of this License or a future\r
+      version of this License released under Section 6.1, and You must\r
+      include a copy of this License with every copy of the Source Code\r
+      You distribute. You may not offer or impose any terms on any Source\r
+      Code version that alters or restricts the applicable version of\r
+      this License or the recipients' rights hereunder. However, You\r
+      may include an additional document offering the additional rights\r
+      described in Section 3.5.\r
+\r
+   3.2. Availability of Source Code.\r
+\r
+      Any Modification which You create or to which You contribute must\r
+      be made available in Source Code form under the terms of this\r
+      License either on the same media as an Executable version or via\r
+      an accepted Electronic Distribution Mechanism to anyone to whom\r
+      you made an Executable version available; and if made available\r
+      via Electronic Distribution Mechanism, must remain available for\r
+      at least twelve (12) months after the date it initially became\r
+      available, or at least six (6) months after a subsequent version\r
+      of that particular Modification has been made available to such\r
+      recipients. You are responsible for ensuring that the Source Code\r
+      version remains available even if the Electronic Distribution\r
+      Mechanism is maintained by a third party.\r
+\r
+   3.3. Description of Modifications.\r
+\r
+      You must cause all Covered Code to which You contribute to contain\r
+      a file documenting the changes You made to create that Covered\r
+      Code and the date of any change. You must include a prominent\r
+      statement that the Modification is derived, directly or indirectly,\r
+      from Original Code provided by the Initial Developer and including\r
+      the name of the Initial Developer in (a) the Source Code, and (b)\r
+      in any notice in an Executable version or related documentation\r
+      in which You describe the origin or ownership of the Covered Code.\r
+\r
+   3.4. Intellectual Property Matters\r
+\r
+      (a) Third Party Claims.\r
+\r
+          If Contributor has knowledge that a license under a third\r
+          party's intellectual property rights is required to exercise\r
+          the rights granted by such Contributor under Sections 2.1 or\r
+          2.2, Contributor must include a text file with the Source Code\r
+          distribution titled "LEGAL" which describes the claim and the\r
+          party making the claim in sufficient detail that a recipient\r
+          will know whom to contact. If Contributor obtains such knowledge\r
+          after the Modification is made available as described in Section\r
+          3.2, Contributor shall promptly modify the LEGAL file in all\r
+          copies Contributor makes available thereafter and shall take\r
+          other steps (such as notifying appropriate mailing lists or\r
+          newsgroups) reasonably calculated to inform those who received\r
+          the Covered Code that new knowledge has been obtained.\r
+\r
+      (b) Contributor APIs.\r
+\r
+          If Contributor's Modifications include an application\r
+          programming interface and Contributor has knowledge of patent\r
+          licenses which are reasonably necessary to implement that\r
+          API, Contributor must also include this information in the\r
+          LEGAL file.\r
+\r
+      (c)   Representations.\r
+\r
+          Contributor represents that, except as disclosed pursuant to\r
+          Section 3.4(a) above, Contributor believes that Contributor's\r
+          Modifications are Contributor's original creation(s) and/or\r
+          Contributor has sufficient rights to grant the rights conveyed\r
+          by this License.\r
+\r
+   3.5. Required Notices.\r
+\r
+      You must duplicate the notice in Exhibit A in each file of the\r
+      Source Code.  If it is not possible to put such notice in a\r
+      particular Source Code file due to its structure, then You must\r
+      include such notice in a location (such as a relevant directory)\r
+      where a user would be likely to look for such a notice.  If You\r
+      created one or more Modification(s) You may add your name as a\r
+      Contributor to the notice described in Exhibit A.  You must also\r
+      duplicate this License in any documentation for the Source Code\r
+      where You describe recipients' rights or ownership rights relating\r
+      to Covered Code.  You may choose to offer, and to charge a fee for,\r
+      warranty, support, indemnity or liability obligations to one or\r
+      more recipients of Covered Code. However, You may do so only on\r
+      Your own behalf, and not on behalf of the Initial Developer or\r
+      any Contributor.\r
+\r
+      You must make it absolutely clear than any such warranty, support,\r
+      indemnity or liability obligation is offered by You alone, and\r
+      You hereby agree to indemnify the Initial Developer and every\r
+      Contributor for any liability incurred by the Initial Developer\r
+      or such Contributor as a result of warranty, support, indemnity\r
+      or liability terms You offer.\r
+\r
+   3.6. Distribution of Executable Versions.\r
+\r
+      You may distribute Covered Code in Executable form only if the\r
+      requirements of Section 3.1-3.5 have been met for that Covered Code,\r
+      and if You include a notice stating that the Source Code version\r
+      of the Covered Code is available under the terms of this License,\r
+      including a description of how and where You have fulfilled the\r
+      obligations of Section 3.2.  The notice must be conspicuously\r
+      included in any notice in an Executable version, related\r
+      documentation or collateral in which You describe recipients'\r
+      rights relating to the Covered Code. You may distribute the\r
+      Executable version of Covered Code or ownership rights under a\r
+      license of Your choice, which may contain terms different from\r
+      this License, provided that You are in compliance with the terms\r
+      of this License and that the license for the Executable version\r
+      does not attempt to limit or alter the recipient's rights in the\r
+      Source Code version from the rights set forth in this License.\r
+      If You distribute the Executable version under a different license\r
+      You must make it absolutely clear that any terms which differ\r
+      from this License are offered by You alone, not by the Initial\r
+      Developer or any Contributor.  You hereby agree to indemnify the\r
+      Initial Developer and every Contributor for any liability incurred\r
+      by the Initial Developer or such Contributor as a result of any\r
+      such terms You offer.\r
+\r
+   3.7. Larger Works.\r
+\r
+      You may create a Larger Work by combining Covered Code with other\r
+      code not governed by the terms of this License and distribute the\r
+      Larger Work as a single product. In such a case, You must make sure\r
+      the requirements of this License are fulfilled for the Covered Code.\r
+\r
+4. Inability to Comply Due to Statute or Regulation.\r
+\r
+   If it is impossible for You to comply with any of the terms of this\r
+   License with respect to some or all of the Covered Code due to statute,\r
+   judicial order, or regulation then You must: (a) comply with the terms\r
+   of this License to the maximum extent possible; and (b) describe the\r
+   limitations and the code they affect. Such description must be included\r
+   in the LEGAL file described in Section 3.4 and must be included with\r
+   all distributions of the Source Code. Except to the extent prohibited\r
+   by statute or regulation, such description must be sufficiently\r
+   detailed for a recipient of ordinary skill to be able to understand it.\r
+\r
+5. Application of this License.\r
+\r
+   This License applies to code to which the Initial Developer has\r
+   attached the notice in Exhibit A and to related Covered Code.\r
+\r
+6. Versions of the License.\r
+\r
+   6.1. New Versions.\r
+\r
+      Netscape Communications Corporation ("Netscape") may publish\r
+      revised and/or new versions of the License from time to time. Each\r
+      version will be given a distinguishing version number.\r
+\r
+   6.2. Effect of New Versions.\r
+\r
+      Once Covered Code has been published under a particular version of\r
+      the License, You may always continue to use it under the terms of\r
+      that version. You may also choose to use such Covered Code under\r
+      the terms of any subsequent version of the License published by\r
+      Netscape. No one other than Netscape has the right to modify the\r
+      terms applicable to Covered Code created under this License.\r
+\r
+   6.3. Derivative Works.\r
+\r
+      If You create or use a modified version of this License (which\r
+      you may only do in order to apply it to code which is not already\r
+      Covered Code governed by this License), You must (a) rename Your\r
+      license so that the phrases "Mozilla", "MOZILLAPL", "MOZPL",\r
+      "Netscape", "MPL", "NPL" or any confusingly similar phrase\r
+      do not appear in your license (except to note that your license\r
+      differs from this License) and (b) otherwise make it clear that\r
+      Your version of the license contains terms which differ from the\r
+      Mozilla Public License and Netscape Public License. (Filling in\r
+      the name of the Initial Developer, Original Code or Contributor\r
+      in the notice described in Exhibit A shall not of themselves be\r
+      deemed to be modifications of this License.)\r
+\r
+7. DISCLAIMER OF WARRANTY.\r
+\r
+   COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS"\r
+   BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,\r
+   INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE\r
+   IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR\r
+   NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE\r
+   OF THE COVERED CODE IS WITH YOU. SHOULD ANY COVERED CODE PROVE\r
+   DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER\r
+   CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR\r
+   CORRECTION.  THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART\r
+   OF THIS LICENSE.  NO USE OF ANY COVERED CODE IS AUTHORIZED HEREUNDER\r
+   EXCEPT UNDER THIS DISCLAIMER.\r
+\r
+8. TERMINATION.\r
+\r
+   8.1.  This License and the rights granted hereunder will terminate\r
+      automatically if You fail to comply with terms herein and fail to\r
+      cure such breach within 30 days of becoming aware of the breach. All\r
+      sublicenses to the Covered Code which are properly granted shall\r
+      survive any termination of this License. Provisions which, by\r
+      their nature, must remain in effect beyond the termination of this\r
+      License shall survive.\r
+\r
+   8.2.  If You initiate litigation by asserting a patent infringement claim\r
+      (excluding declatory judgment actions) against Initial Developer or\r
+      a Contributor (the Initial Developer or Contributor against whom You\r
+      file such action is referred to as "Participant")  alleging that:\r
+\r
+      (a) such Participant's Contributor Version directly or indirectly\r
+          infringes any patent, then any and all rights granted by\r
+          such Participant to You under Sections 2.1 and/or 2.2 of this\r
+          License shall, upon 60 days notice from Participant terminate\r
+          prospectively, unless if within 60 days after receipt of\r
+          notice You either: (i)  agree in writing to pay Participant\r
+          a mutually agreeable reasonable royalty for Your past and\r
+          future use of Modifications made by such Participant, or (ii)\r
+          withdraw Your litigation claim with respect to the Contributor\r
+          Version against such Participant.  If within 60 days of notice,\r
+          a reasonable royalty and payment arrangement are not mutually\r
+          agreed upon in writing by the parties or the litigation claim\r
+          is not withdrawn, the rights granted by Participant to You\r
+          under Sections 2.1 and/or 2.2 automatically terminate at the\r
+          expiration of the 60 day notice period specified above.\r
+\r
+      (b) any software, hardware, or device, other than such Participant's\r
+          Contributor Version, directly or indirectly infringes any\r
+          patent, then any rights granted to You by such Participant\r
+          under Sections 2.1(b) and 2.2(b) are revoked effective as of\r
+          the date You first made, used, sold, distributed, or had made,\r
+          Modifications made by that Participant.\r
+\r
+   8.3.  If You assert a patent infringement claim against Participant\r
+      alleging that such Participant's Contributor Version directly\r
+      or indirectly infringes any patent where such claim is resolved\r
+      (such as by license or settlement) prior to the initiation of\r
+      patent infringement litigation, then the reasonable value of the\r
+      licenses granted by such Participant under Sections 2.1 or 2.2\r
+      shall be taken into account in determining the amount or value of\r
+      any payment or license.\r
+\r
+   8.4.  In the event of termination under Sections 8.1 or 8.2 above,  all\r
+      end user license agreements (excluding distributors and resellers)\r
+      which have been validly granted by You or any distributor hereunder\r
+      prior to termination shall survive termination.\r
+\r
+9. LIMITATION OF LIABILITY.\r
+\r
+   UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT\r
+   (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL\r
+   DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,\r
+   OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR\r
+   ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY\r
+   CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,\r
+   WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER\r
+   COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN\r
+   INFORMED OF THE POSSIBILITY OF SUCH DAMAGES.  THIS LIMITATION OF\r
+   LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY\r
+   RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE\r
+   LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE\r
+   EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES,\r
+   SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.\r
+\r
+10. U.S. GOVERNMENT END USERS.\r
+\r
+   The Covered Code is a "commercial item," as that term is defined\r
+   in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer\r
+   software" and "commercial computer software documentation," as\r
+   such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent\r
+   with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4\r
+   (June 1995), all U.S. Government End Users acquire Covered Code with\r
+   only those rights set forth herein.\r
+\r
+11. MISCELLANEOUS.\r
+\r
+   This License represents the complete agreement concerning subject\r
+   matter hereof. If any provision of this License is held to be\r
+   unenforceable, such provision shall be reformed only to the extent\r
+   necessary to make it enforceable.\r
+\r
+   This License shall be governed by California law provisions (except to\r
+   the extent applicable law, if any, provides otherwise), excluding its\r
+   conflict-of-law provisions. With respect to disputes in which at least\r
+   one party is a citizen of, or an entity chartered or registered to do\r
+   business in the United States of America, any litigation relating to\r
+   this License shall be subject to the jurisdiction of the Federal Courts\r
+   of the Northern District of California, with venue lying in Santa\r
+   Clara County, California, with the losing party responsible for costs,\r
+   including without limitation, court costs and reasonable attorneys'\r
+   fees and expenses. The application of the United Nations Convention on\r
+   Contracts for the International Sale of Goods is expressly excluded.\r
+   Any law or regulation which provides that the language of a contract\r
+   shall be construed against the drafter shall not apply to this License.\r
+\r
+12. RESPONSIBILITY FOR CLAIMS.\r
+\r
+   As between Initial Developer and the Contributors, each party is\r
+   responsible for claims and damages arising, directly or indirectly,\r
+   out of its utilization of rights under this License and You agree\r
+   to work with Initial Developer and Contributors to distribute such\r
+   responsibility on an equitable basis.  Nothing herein is intended or\r
+   shall be deemed to constitute any admission of liability.\r
+\r
+13. MULTIPLE-LICENSED CODE.\r
+\r
+   Initial Developer may designate portions of the Covered Code\r
+   as Multiple-Licensed.  Multiple-Licensed means that the Initial\r
+   Developer permits you to utilize portions of the Covered Code under\r
+   Your choice of the MPL or the alternative licenses, if any, specified\r
+   by the Initial Developer in the file described in Exhibit A.\r
+\r
+\r
+EXHIBIT A -Mozilla Public License.\r
+\r
+   ``The contents of this file are subject to the Mozilla Public License\r
+   Version 1.1 (the "License"); you may not use this file except in\r
+   compliance with the License. You may obtain a copy of the License at\r
+\r
+   http://www.mozilla.org/MPL/\r
+\r
+   Software distributed under the License is distributed on an "AS IS"\r
+   basis, WITHOUT WARRANTY OF\r
+\r
+   ANY KIND, either express or implied. See the License for the specific\r
+   language governing rights and limitations under the License.\r
+\r
+   The Original Code is ______________________________________.\r
+\r
+   The Initial Developer of the Original Code is ________________________.\r
+   Portions created by\r
+\r
+   ______________________ are Copyright (C) ______\r
+   _______________________.\r
+   All Rights Reserved.\r
+\r
+   Contributor(s): ______________________________________.\r
+\r
+   Alternatively, the contents of this file may be used under the terms of\r
+   the _____ license (the  [___] License), in which case the provisions of\r
+   [______] License are applicable  instead of those above.  If you wish\r
+   to allow use of your version of this file only under the terms of the\r
+   [____] License and not to allow others to use your version of this\r
+   file under the MPL, indicate your decision by deleting  the provisions\r
+   above and replace  them with the notice and other provisions required\r
+   by the [___] License.  If you do not delete the provisions above,\r
+   a recipient may use your version of this file under either the MPL\r
+   or the [___] License."\r
+\r
+   [NOTE: The text of this Exhibit A may differ slightly from the text\r
+   of the notices in the Source Code files of the Original Code. You\r
+   should use the text of this Exhibit A rather than the text found in\r
+   the Original Code Source Code for Your Modifications.]\r
+\r
+\r
+\r
+===========================================================================\r
+\r
+To the extent any open source components are licensed under the\r
+GPL and/or LGPL, or other similar licenses that require the\r
+source code and/or modifications to source code to be made\r
+available (as would be noted above), you may obtain a copy of\r
+the source code corresponding to the binaries for such open\r
+source components and modifications thereto, if any, (the\r
+"Source Files"), by downloading the Source Files from Pivotal's website at\r
+http://www.pivotal.io/open-source, or by sending a request, \r
+with your name and address to: Pivotal Software, Inc., 3496 Deer Creek Rd, \r
+Palo Alto, CA 94304, Attention: General Counsel. All such requests should \r
+clearly specify: OPEN SOURCE FILES REQUEST, Attention General Counsel. \r
+Pivotal shall mail a copy of the Source Files to you on a CD or equivalent physical medium. \r
+This offer to obtain a copy of the Source Files is valid for three\r
+years from the date you acquired this Software product. \r
+Alternatively, the Source Files may accompany the Pivotal product.\r
+\r
+\r
+[RABBITJMS146GASS110315]
\ No newline at end of file
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/Makefile b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/Makefile
new file mode 100644 (file)
index 0000000..8c8adfb
--- /dev/null
@@ -0,0 +1,15 @@
+PROJECT = rabbitmq_jms_topic_exchange
+
+DEPS = amqp_client
+TEST_DEPS += rabbit
+
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/README.md b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/README.md
new file mode 100644 (file)
index 0000000..9d01c9a
--- /dev/null
@@ -0,0 +1,51 @@
+# RabbitMQ JMS Topic Exchange Plugin
+
+## Overview
+
+This plugin adds server-side support for RabbitMQ JMS client. All JMS-related
+projects are in the process of being open sourced by the RabbitMQ team
+and **should not be used unless the process is complete and announced**.
+
+This plugin is designed to work with the JMS Client for RabbitMQ. It
+supports JMS topic routing and selection based on JMS SQL selection
+rules.
+
+This implementation is based upon the Java Messaging Service
+Specification Version 1.1, see [The JMS
+Specs](http://www.oracle.com/technetwork/java/docs-136352.html) for a
+copy of that specification.
+
+## Design
+
+The plugin this generates is a user-written exchange type for RabbitMQ
+client use. The exchange type name is "`x_jms_topic`" but this is _not_
+a topic exchange. Instead it works together with a standard topic
+exchange to provide the JMS topic selection function.
+
+When JMS Selectors are used on a Topic Destination consumer, the
+destination (queue) is bound to an exchange of type `x_jms_topic`, with
+arguments that indicate what the selection criteria are. The
+`x_jms_topic` exchange is, in turn, bound to the standard Topic Exchange
+used by JMS messaging (this uses the RabbitMQ exchange-to-exchange
+binding extension to the AMQP 0-9-1 protocol).
+
+In this way, normal topic routing can occur, with the overhead of
+selection only applying when selection is used, and _after_ the routing
+and filtering implied by the topic name.
+
+## Building From Source
+
+Building is no different from [building other RabbitMQ plugins](http://www.rabbitmq.com/plugin-development.html).
+
+TL;DR:
+
+    git clone https://github.com/rabbitmq/rabbitmq-jms-topic-exchange.git
+    cd rabbitmq-jms-topic-exchange
+    make -j dist
+    ls plugins/*
+    
+## Copyright and License
+
+(c) Pivotal Software Inc., 2007-2016.
+
+See [LICENSE](./LICENSE) for license information.
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/erlang.mk b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/erlang.mk
new file mode 100644 (file)
index 0000000..9f0c0c3
--- /dev/null
@@ -0,0 +1,6589 @@
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app deps search rel docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+
+ERLANG_MK_VERSION = 2.0.0-pre.2-16-gb52203c-dirty
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+gen_verbose_0 = @echo " GEN   " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A0 -noinput -boot start_clean
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+       $(verbose) :
+
+check:: clean app tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+       $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+distclean-tmp:
+       $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+       $(verbose) printf "%s\n" \
+               "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+               "Copyright (c) 2013-2015 Loïc Hoguin <essen@ninenines.eu>" \
+               "" \
+               "Usage: [V=1] $(MAKE) [target]..." \
+               "" \
+               "Core targets:" \
+               "  all           Run deps, app and rel targets in that order" \
+               "  app           Compile the project" \
+               "  deps          Fetch dependencies (if needed) and compile them" \
+               "  fetch-deps    Fetch dependencies (if needed) without compiling them" \
+               "  list-deps     Fetch dependencies (if needed) and list them" \
+               "  search q=...  Search for a package in the built-in index" \
+               "  rel           Build a release for this project, if applicable" \
+               "  docs          Build the documentation for this project" \
+               "  install-docs  Install the man pages for this project" \
+               "  check         Compile and run all tests and analysis for this project" \
+               "  tests         Run the tests for this project" \
+               "  clean         Delete temporary and output files from most targets" \
+               "  distclean     Delete all temporary and output files" \
+               "  help          Display this help and exit" \
+               "  erlang-mk     Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty)        $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $(2) -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(subst ",\",$(1)))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(subst \,\\\\,$(shell cygpath -w $1))
+else
+core_native_path = $1
+endif
+
+ifeq ($(shell which wget 2>/dev/null | wc -l), 1)
+define core_http_get
+       wget --no-check-certificate -O $(1) $(2)|| rm $(1)
+endef
+else
+define core_http_get.erl
+       ssl:start(),
+       inets:start(),
+       case httpc:request(get, {"$(2)", []}, [{autoredirect, true}], []) of
+               {ok, {{_, 200, _}, _, Body}} ->
+                       case file:write_file("$(1)", Body) of
+                               ok -> ok;
+                               {error, R1} -> halt(R1)
+                       end;
+               {error, R2} ->
+                       halt(R2)
+       end,
+       halt(0).
+endef
+
+define core_http_get
+       $(call erlang,$(call core_http_get.erl,$(call core_native_path,$1),$2))
+endef
+endif
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) -type f -name $(subst *,\*,$2)))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk:
+       git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ifdef ERLANG_MK_COMMIT
+       cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+endif
+       if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+       $(MAKE) -C $(ERLANG_MK_BUILD_DIR)
+       cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+       rm -rf $(ERLANG_MK_BUILD_DIR)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = 1.0.4
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/riverrun/branglecrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/riverrun/branglecrypt
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = master
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = v0.1.2
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY.  It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += classifier
+pkg_classifier_name = classifier
+pkg_classifier_description = An Erlang Bayesian Filter and Text Classifier
+pkg_classifier_homepage = https://github.com/inaka/classifier
+pkg_classifier_fetch = git
+pkg_classifier_repo = https://github.com/inaka/classifier
+pkg_classifier_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.1
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.1
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dhtcrawler
+pkg_dhtcrawler_name = dhtcrawler
+pkg_dhtcrawler_description = dhtcrawler is a DHT crawler written in erlang. It can join a DHT network and crawl many P2P torrents.
+pkg_dhtcrawler_homepage = https://github.com/kevinlynx/dhtcrawler
+pkg_dhtcrawler_fetch = git
+pkg_dhtcrawler_repo = https://github.com/kevinlynx/dhtcrawler
+pkg_dhtcrawler_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D    NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dtl
+pkg_dtl_name = dtl
+pkg_dtl_description = Django Template Language: A full-featured port of the Django template engine to Erlang.
+pkg_dtl_homepage = https://github.com/oinksoft/dtl
+pkg_dtl_fetch = git
+pkg_dtl_repo = https://github.com/oinksoft/dtl
+pkg_dtl_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += eganglia
+pkg_eganglia_name = eganglia
+pkg_eganglia_description = Erlang library to interact with Ganglia
+pkg_eganglia_homepage = https://github.com/inaka/eganglia
+pkg_eganglia_fetch = git
+pkg_eganglia_repo = https://github.com/inaka/eganglia
+pkg_eganglia_commit = v0.9.1
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = 2.0.4
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/knutin/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/knutin/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = 0.2.4
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = 0.1.1
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = exec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = 1.2
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = v1.4.6
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards  and  for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gossiperl
+pkg_gossiperl_name = gossiperl
+pkg_gossiperl_description = Gossip middleware in Erlang
+pkg_gossiperl_homepage = http://gossiperl.com/
+pkg_gossiperl_fetch = git
+pkg_gossiperl_repo = https://github.com/gossiperl/gossiperl
+pkg_gossiperl_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = v4.1.1
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = 0.6.0
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/klarna/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/klarna/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = 0.3.3
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = 0.3
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/basho/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/basho/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/basho/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/basho/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = 0.1.0
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = Erlang MySQL Driver (from code.google.com)
+pkg_mysql_homepage = https://github.com/dizzyd/erlang-mysql-driver
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/dizzyd/erlang-mysql-driver
+pkg_mysql_commit = master
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += oauth2c
+pkg_oauth2c_name = oauth2c
+pkg_oauth2c_description = Erlang OAuth2 Client
+pkg_oauth2c_homepage = https://github.com/kivra/oauth2_client
+pkg_oauth2c_fetch = git
+pkg_oauth2c_repo = https://github.com/kivra/oauth2_client
+pkg_oauth2c_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = 1.0.0
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = 0.3
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = 0.4.0
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.1.0
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = 2.2.1
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = 0.1.0
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er    lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool    : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global process registry for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://github.com/krestenkrab/triq
+pkg_triq_fetch = git
+pkg_triq_repo = https://github.com/krestenkrab/triq
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = 0.3.0
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = v1.4.0
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = 1.0.3
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = 0.2.0
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit =  
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+       $(verbose) printf "%s\n" \
+               $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name:    $(1)") \
+               "App name:    $(pkg_$(1)_name)" \
+               "Description: $(pkg_$(1)_description)" \
+               "Home page:   $(pkg_$(1)_homepage)" \
+               "Fetch with:  $(pkg_$(1)_fetch)" \
+               "Repository:  $(pkg_$(1)_repo)" \
+               "Commit:      $(pkg_$(1)_commit)" \
+               ""
+
+endef
+
+search:
+ifdef q
+       $(foreach p,$(PACKAGES), \
+               $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+                       $(call pkg_print,$(p))))
+else
+       $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+dep_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+dep_repo = $(patsubst git://github.com/%,https://github.com/%, \
+       $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo)))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+       ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+       ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP   " $(1);
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS)
+ifndef IS_APP
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               $(MAKE) -C $$dep IS_APP=1 || exit $$?; \
+       done
+endif
+ifneq ($(IS_DEP),1)
+       $(verbose) rm -f $(ERLANG_MK_TMP)/deps.log
+endif
+       $(verbose) mkdir -p $(ERLANG_MK_TMP)
+       $(verbose) for dep in $(ALL_DEPS_DIRS) ; do \
+               if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+                       :; \
+               else \
+                       echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+                       if [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+                               $(MAKE) -C $$dep IS_DEP=1 || exit $$?; \
+                       else \
+                               echo "Error: No Makefile to build dependency $$dep."; \
+                               exit 2; \
+                       fi \
+               fi \
+       done
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+       if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+               if [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+                       $(call dep_autopatch2,$(1)); \
+               elif [ 0 != `grep -ci rebar $(DEPS_DIR)/$(1)/Makefile` ]; then \
+                       $(call dep_autopatch2,$(1)); \
+               elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i rebar '{}' \;`" ]; then \
+                       $(call dep_autopatch2,$(1)); \
+               else \
+                       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+                               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+                               $(call dep_autopatch_erlang_mk,$(1)); \
+                       else \
+                               $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
+                       fi \
+               fi \
+       else \
+               if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+                       $(call dep_autopatch_noop,$(1)); \
+               else \
+                       $(call dep_autopatch2,$(1)); \
+               fi \
+       fi
+endef
+
+define dep_autopatch2
+       $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+       if [ -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
+               $(call dep_autopatch_fetch_rebar); \
+               $(call dep_autopatch_rebar,$(1)); \
+       else \
+               $(call dep_autopatch_gen,$(1)); \
+       fi
+endef
+
+define dep_autopatch_noop
+       printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Overwrite erlang.mk with the current file by default.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+       echo "include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(DEPS_DIR)/app)/erlang.mk" \
+               > $(DEPS_DIR)/$1/erlang.mk
+endef
+else
+define dep_autopatch_erlang_mk
+       :
+endef
+endif
+
+define dep_autopatch_gen
+       printf "%s\n" \
+               "ERLC_OPTS = +debug_info" \
+               "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+define dep_autopatch_fetch_rebar
+       mkdir -p $(ERLANG_MK_TMP); \
+       if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+               git clone -q -n -- https://github.com/rebar/rebar $(ERLANG_MK_TMP)/rebar; \
+               cd $(ERLANG_MK_TMP)/rebar; \
+               git checkout -q 791db716b5a3a7671e0b351f95ddf24b848ee173; \
+               $(MAKE); \
+               cd -; \
+       fi
+endef
+
+define dep_autopatch_rebar
+       if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+               mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+       fi; \
+       $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+       rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+       application:load(rebar),
+       application:set_env(rebar, log_level, debug),
+       Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+               {ok, Conf0} -> Conf0;
+               _ -> []
+       end,
+       {Conf, OsEnv} = fun() ->
+               case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+                       false -> {Conf1, []};
+                       true ->
+                               Bindings0 = erl_eval:new_bindings(),
+                               Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+                               Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+                               Before = os:getenv(),
+                               {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+                               {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+               end
+       end(),
+       Write = fun (Text) ->
+               file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+       end,
+       Escape = fun (Text) ->
+               re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+       end,
+       Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+               "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+       Write("C_SRC_DIR = /path/do/not/exist\n"),
+       Write("C_SRC_TYPE = rebar\n"),
+       Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+       Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+       fun() ->
+               Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+               case lists:keyfind(erl_opts, 1, Conf) of
+                       false -> ok;
+                       {_, ErlOpts} ->
+                               lists:foreach(fun
+                                       ({d, D}) ->
+                                               Write("ERLC_OPTS += -D" ++ atom_to_list(D) ++ "=1\n");
+                                       ({i, I}) ->
+                                               Write(["ERLC_OPTS += -I ", I, "\n"]);
+                                       ({platform_define, Regex, D}) ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> Write("ERLC_OPTS += -D" ++ atom_to_list(D) ++ "=1\n");
+                                                       false -> ok
+                                               end;
+                                       ({parse_transform, PT}) ->
+                                               Write("ERLC_OPTS += +'{parse_transform, " ++ atom_to_list(PT) ++ "}'\n");
+                                       (_) -> ok
+                               end, ErlOpts)
+               end,
+               Write("\n")
+       end(),
+       fun() ->
+               File = case lists:keyfind(deps, 1, Conf) of
+                       false -> [];
+                       {_, Deps} ->
+                               [begin case case Dep of
+                                                       {N, S} when is_atom(N), is_list(S) -> {N, {hex, S}};
+                                                       {N, S} when is_tuple(S) -> {N, S};
+                                                       {N, _, S} -> {N, S};
+                                                       {N, _, S, _} -> {N, S};
+                                                       _ -> false
+                                               end of
+                                       false -> ok;
+                                       {Name, Source} ->
+                                               {Method, Repo, Commit} = case Source of
+                                                       {hex, V} -> {hex, V, undefined};
+                                                       {git, R} -> {git, R, master};
+                                                       {M, R, {branch, C}} -> {M, R, C};
+                                                       {M, R, {ref, C}} -> {M, R, C};
+                                                       {M, R, {tag, C}} -> {M, R, C};
+                                                       {M, R, C} -> {M, R, C}
+                                               end,
+                                               Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+                               end end || Dep <- Deps]
+               end
+       end(),
+       fun() ->
+               case lists:keyfind(erl_first_files, 1, Conf) of
+                       false -> ok;
+                       {_, Files} ->
+                               Names = [[" ", case lists:reverse(F) of
+                                       "lre." ++ Elif -> lists:reverse(Elif);
+                                       Elif -> lists:reverse(Elif)
+                               end] || "src/" ++ F <- Files],
+                               Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+               end
+       end(),
+       FindFirst = fun(F, Fd) ->
+               case io:parse_erl_form(Fd, undefined) of
+                       {ok, {attribute, _, compile, {parse_transform, PT}}, _} ->
+                               [PT, F(F, Fd)];
+                       {ok, {attribute, _, compile, CompileOpts}, _} when is_list(CompileOpts) ->
+                               case proplists:get_value(parse_transform, CompileOpts) of
+                                       undefined -> [F(F, Fd)];
+                                       PT -> [PT, F(F, Fd)]
+                               end;
+                       {ok, {attribute, _, include, Hrl}, _} ->
+                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
+                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
+                                       _ ->
+                                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ Hrl, [read]) of
+                                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
+                                                       _ -> [F(F, Fd)]
+                                               end
+                               end;
+                       {ok, {attribute, _, include_lib, "$(1)/include/" ++ Hrl}, _} ->
+                               {ok, HrlFd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]),
+                               [F(F, HrlFd), F(F, Fd)];
+                       {ok, {attribute, _, include_lib, Hrl}, _} ->
+                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
+                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
+                                       _ -> [F(F, Fd)]
+                               end;
+                       {ok, {attribute, _, import, {Imp, _}}, _} ->
+                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(Imp) ++ ".erl", [read]) of
+                                       {ok, ImpFd} -> [Imp, F(F, ImpFd), F(F, Fd)];
+                                       _ -> [F(F, Fd)]
+                               end;
+                       {eof, _} ->
+                               file:close(Fd),
+                               [];
+                       _ ->
+                               F(F, Fd)
+               end
+       end,
+       fun() ->
+               ErlFiles = filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)*.erl"),
+               First0 = lists:usort(lists:flatten([begin
+                       {ok, Fd} = file:open(F, [read]),
+                       FindFirst(FindFirst, Fd)
+               end || F <- ErlFiles])),
+               First = lists:flatten([begin
+                       {ok, Fd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", [read]),
+                       FindFirst(FindFirst, Fd)
+               end || M <- First0, lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)]) ++ First0,
+               Write(["COMPILE_FIRST +=", [[" ", atom_to_list(M)] || M <- First,
+                       lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)], "\n"])
+       end(),
+       Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+       Write("\npreprocess::\n"),
+       Write("\npre-deps::\n"),
+       Write("\npre-app::\n"),
+       PatchHook = fun(Cmd) ->
+               case Cmd of
+                       "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+                       "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+                       "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+                       "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+                       _ -> Escape(Cmd)
+               end
+       end,
+       fun() ->
+               case lists:keyfind(pre_hooks, 1, Conf) of
+                       false -> ok;
+                       {_, Hooks} ->
+                               [case H of
+                                       {'get-deps', Cmd} ->
+                                               Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+                                       {compile, Cmd} ->
+                                               Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+                                       {Regex, compile, Cmd} ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+                                                       false -> ok
+                                               end;
+                                       _ -> ok
+                               end || H <- Hooks]
+               end
+       end(),
+       ShellToMk = fun(V) ->
+               re:replace(re:replace(V, "(\\\\$$)(\\\\w*)", "\\\\1(\\\\2)", [global]),
+                       "-Werror\\\\b", "", [{return, list}, global])
+       end,
+       PortSpecs = fun() ->
+               case lists:keyfind(port_specs, 1, Conf) of
+                       false ->
+                               case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+                                       false -> [];
+                                       true ->
+                                               [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+                                                       proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+                               end;
+                       {_, Specs} ->
+                               lists:flatten([case S of
+                                       {Output, Input} -> {ShellToMk(Output), Input, []};
+                                       {Regex, Output, Input} ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> {ShellToMk(Output), Input, []};
+                                                       false -> []
+                                               end;
+                                       {Regex, Output, Input, [{env, Env}]} ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> {ShellToMk(Output), Input, Env};
+                                                       false -> []
+                                               end
+                               end || S <- Specs])
+               end
+       end(),
+       PortSpecWrite = fun (Text) ->
+               file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+       end,
+       case PortSpecs of
+               [] -> ok;
+               _ ->
+                       Write("\npre-app::\n\t$$\(MAKE) -f c_src/Makefile.erlang.mk\n"),
+                       PortSpecWrite(io_lib:format("ERL_CFLAGS = -finline-functions -Wall -fPIC -I ~s/erts-~s/include -I ~s\n",
+                               [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+                       PortSpecWrite(io_lib:format("ERL_LDFLAGS = -L ~s -lerl_interface -lei\n",
+                               [code:lib_dir(erl_interface, lib)])),
+                       [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+                       FilterEnv = fun(Env) ->
+                               lists:flatten([case E of
+                                       {_, _} -> E;
+                                       {Regex, K, V} ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> {K, V};
+                                                       false -> []
+                                               end
+                               end || E <- Env])
+                       end,
+                       MergeEnv = fun(Env) ->
+                               lists:foldl(fun ({K, V}, Acc) ->
+                                       case lists:keyfind(K, 1, Acc) of
+                                               false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+                                               {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+                                       end
+                               end, [], Env)
+                       end,
+                       PortEnv = case lists:keyfind(port_env, 1, Conf) of
+                               false -> [];
+                               {_, PortEnv0} -> FilterEnv(PortEnv0)
+                       end,
+                       PortSpec = fun ({Output, Input0, Env}) ->
+                               filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+                               Input = [[" ", I] || I <- Input0],
+                               PortSpecWrite([
+                                       [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+                                       case $(PLATFORM) of
+                                               darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+                                               _ -> ""
+                                       end,
+                                       "\n\nall:: ", Output, "\n\n",
+                                       "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+                                       "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+                                       "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+                                       "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+                                       [[Output, ": ", K, " = ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+                                       Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+                                               "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+                                       "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+                                       case filename:extension(Output) of
+                                               [] -> "\n";
+                                               _ -> " -shared\n"
+                                       end])
+                       end,
+                       [PortSpec(S) || S <- PortSpecs]
+       end,
+       Write("\ninclude $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(DEPS_DIR)/app)/erlang.mk"),
+       RunPlugin = fun(Plugin, Step) ->
+               case erlang:function_exported(Plugin, Step, 2) of
+                       false -> ok;
+                       true ->
+                               c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+                               Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+                                       dict:store(base_dir, "", dict:new())}, undefined),
+                               io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+               end
+       end,
+       fun() ->
+               case lists:keyfind(plugins, 1, Conf) of
+                       false -> ok;
+                       {_, Plugins} ->
+                               [begin
+                                       case lists:keyfind(deps, 1, Conf) of
+                                               false -> ok;
+                                               {_, Deps} ->
+                                                       case lists:keyfind(P, 1, Deps) of
+                                                               false -> ok;
+                                                               _ ->
+                                                                       Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+                                                                       io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+                                                                       io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+                                                                       code:add_patha(Path ++ "/ebin")
+                                                       end
+                                       end
+                               end || P <- Plugins],
+                               [case code:load_file(P) of
+                                       {module, P} -> ok;
+                                       _ ->
+                                               case lists:keyfind(plugin_dir, 1, Conf) of
+                                                       false -> ok;
+                                                       {_, PluginsDir} ->
+                                                               ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+                                                               {ok, P, Bin} = compile:file(ErlFile, [binary]),
+                                                               {module, P} = code:load_binary(P, ErlFile, Bin)
+                                               end
+                               end || P <- Plugins],
+                               [RunPlugin(P, preprocess) || P <- Plugins],
+                               [RunPlugin(P, pre_compile) || P <- Plugins],
+                               [RunPlugin(P, compile) || P <- Plugins]
+               end
+       end(),
+       halt()
+endef
+
+define dep_autopatch_app.erl
+       UpdateModules = fun(App) ->
+               case filelib:is_regular(App) of
+                       false -> ok;
+                       true ->
+                               {ok, [{application, '$(1)', L0}]} = file:consult(App),
+                               Mods = filelib:fold_files("$(call core_native_path,$(DEPS_DIR)/$1/src)", "\\\\.erl$$", true,
+                                       fun (F, Acc) -> [list_to_atom(filename:rootname(filename:basename(F)))|Acc] end, []),
+                               L = lists:keystore(modules, 1, L0, {modules, Mods}),
+                               ok = file:write_file(App, io_lib:format("~p.~n", [{application, '$(1)', L}]))
+               end
+       end,
+       UpdateModules("$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"),
+       halt()
+endef
+
+define dep_autopatch_appsrc.erl
+       AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+       AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+       case filelib:is_regular(AppSrcIn) of
+               false -> ok;
+               true ->
+                       {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+                       L1 = lists:keystore(modules, 1, L0, {modules, []}),
+                       L2 = case lists:keyfind(vsn, 1, L1) of {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, "git"}); _ -> L1 end,
+                       L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+                       ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+                       case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+       end,
+       halt()
+endef
+
+define dep_fetch_git
+       git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+       cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-submodule
+       git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+       hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+       cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+       svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+       cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_hex.erl
+       ssl:start(),
+       inets:start(),
+       {ok, {{_, 200, _}, _, Body}} = httpc:request(get,
+               {"https://s3.amazonaws.com/s3.hex.pm/tarballs/$(1)-$(2).tar", []},
+               [], [{body_format, binary}]),
+       {ok, Files} = erl_tar:extract({binary, Body}, [memory]),
+       {_, Source} = lists:keyfind("contents.tar.gz", 1, Files),
+       ok = erl_tar:extract({binary, Source}, [{cwd, "$(call core_native_path,$(DEPS_DIR)/$1)"}, compressed]),
+       halt()
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+       $(call erlang,$(call dep_fetch_hex.erl,$(1),$(strip $(word 2,$(dep_$(1))))));
+endef
+
+define dep_fetch_fail
+       echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+       exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+       $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+       git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+       cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_fetch
+       $(if $(dep_$(1)), \
+               $(if $(dep_fetch_$(word 1,$(dep_$(1)))), \
+                       $(word 1,$(dep_$(1))), \
+                       $(if $(IS_DEP),legacy,fail)), \
+               $(if $(filter $(1),$(PACKAGES)), \
+                       $(pkg_$(1)_fetch), \
+                       fail))
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1):
+       $(eval DEP_NAME := $(call dep_name,$1))
+       $(eval DEP_STR := $(if $(filter-out $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+       $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+               echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)."; \
+               exit 17; \
+       fi
+       $(verbose) mkdir -p $(DEPS_DIR)
+       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$1)),$1)
+       $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure.ac -o -f $(DEPS_DIR)/$(DEP_NAME)/configure.in ]; then \
+               echo " AUTO  " $(DEP_STR); \
+               cd $(DEPS_DIR)/$(DEP_NAME) && autoreconf -Wall -vif -I m4; \
+       fi
+       - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+               echo " CONF  " $(DEP_STR); \
+               cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+       fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+       $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+               if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+                       echo " PATCH  Downloading rabbitmq-codegen"; \
+                       git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+               fi; \
+               if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+                       echo " PATCH  Downloading rabbitmq-server"; \
+                       git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+               fi; \
+               ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+       elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+               if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+                       echo " PATCH  Downloading rabbitmq-codegen"; \
+                       git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+               fi \
+       else \
+               $$(call dep_autopatch,$(DEP_NAME)) \
+       fi
+endif
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               $(MAKE) -C $$dep clean IS_APP=1 || exit $$?; \
+       done
+
+distclean:: distclean-apps
+
+distclean-apps:
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               $(MAKE) -C $$dep distclean IS_APP=1 || exit $$?; \
+       done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+       $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/list-deps.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/list-doc-deps.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/list-rel-deps.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/list-test-deps.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/list-shell-deps.log
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+define core_dep_plugin
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endef
+
+$(foreach p,$(DEP_PLUGINS),\
+       $(eval $(if $(findstring /,$p),\
+               $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+               $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_SUFFIX ?= _dtl
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL   " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+define erlydtl_compile.erl
+       [begin
+               Module0 = case "$(strip $(DTL_FULL_PATH))" of
+                       "" ->
+                               filename:basename(F, ".dtl");
+                       _ ->
+                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
+                               re:replace(F2, "/",  "_",  [{return, list}, global])
+               end,
+               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+               case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
+                       ok -> ok;
+                       {ok, _} -> ok
+               end
+       end || F <- string:tokens("$(1)", " ")],
+       halt().
+endef
+
+ifneq ($(wildcard src/),)
+
+DTL_FILES = $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifdef DTL_FULL_PATH
+BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(subst /,_,$(DTL_FILES:$(DTL_PATH)%=%))))
+else
+BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(notdir $(DTL_FILES))))
+endif
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
+       @mkdir -p $(ERLANG_MK_TMP)
+       @if test -f $@; then \
+               touch $(DTL_FILES); \
+       fi
+       @touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+ebin/$(PROJECT).app:: $(DTL_FILES)
+       $(if $(strip $?),\
+               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?,-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/)))
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+define compile_proto
+       $(verbose) mkdir -p ebin/ include/
+       $(proto_verbose) $(call erlang,$(call compile_proto.erl,$(1)))
+       $(proto_verbose) erlc +debug_info -o ebin/ ebin/*.erl
+       $(verbose) rm ebin/*.erl
+endef
+
+define compile_proto.erl
+       [begin
+               Dir = filename:dirname(filename:dirname(F)),
+               protobuffs_compile:generate_source(F,
+                       [{output_include_dir, Dir ++ "/include"},
+                               {output_src_dir, Dir ++ "/ebin"}])
+       end || F <- string:tokens("$(1)", " ")],
+       halt().
+endef
+
+ifneq ($(wildcard src/),)
+ebin/$(PROJECT).app:: $(sort $(call core_find,src/,*.proto))
+       $(if $(strip $?),$(call compile_proto,$?))
+endif
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+       +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP   " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP   " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC  " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+       $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL  " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1  " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB   " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+ifeq ($(wildcard ebin/test),)
+app:: deps $(PROJECT).d
+       $(verbose) $(MAKE) --no-print-directory app-build
+else
+app:: clean deps $(PROJECT).d
+       $(verbose) $(MAKE) --no-print-directory app-build
+endif
+
+ifeq ($(wildcard src/$(PROJECT)_app.erl),)
+define app_file
+{application, $(PROJECT), [
+       {description, "$(PROJECT_DESCRIPTION)"},
+       {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+       {id$(comma)$(space)"$(1)"}$(comma))
+       {modules, [$(call comma_list,$(2))]},
+       {registered, []},
+       {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS))]}
+]}.
+endef
+else
+define app_file
+{application, $(PROJECT), [
+       {description, "$(PROJECT_DESCRIPTION)"},
+       {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+       {id$(comma)$(space)"$(1)"}$(comma))
+       {modules, [$(call comma_list,$(2))]},
+       {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+       {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS))]},
+       {mod, {$(PROJECT)_app, []}}
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+       $(verbose) :
+
+# Source files.
+
+ERL_FILES = $(sort $(call core_find,src/,*.erl))
+CORE_FILES = $(sort $(call core_find,src/,*.core))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+       $(verbose) mkdir -p include/
+       $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(1)
+       $(verbose) mv asn1/*.erl src/
+       $(verbose) mv asn1/*.hrl include/
+       $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+       $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+       $(verbose) mkdir -p include/ priv/mibs/
+       $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+       $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES = $(sort $(call core_find,src/,*.xrl))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES = $(sort $(call core_find,src/,*.yrl))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+       $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+       ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+       Modules = [{filename:basename(F, ".erl"), F} || F <- ErlFiles],
+       Add = fun (Dep, Acc) ->
+               case lists:keyfind(atom_to_list(Dep), 1, Modules) of
+                       {_, DepFile} -> [DepFile|Acc];
+                       false -> Acc
+               end
+       end,
+       AddHd = fun (Dep, Acc) ->
+               case {Dep, lists:keymember(Dep, 2, Modules)} of
+                       {"src/" ++ _, false} -> [Dep|Acc];
+                       {"include/" ++ _, false} -> [Dep|Acc];
+                       _ -> Acc
+               end
+       end,
+       CompileFirst = fun (Deps) ->
+               First0 = [case filename:extension(D) of
+                       ".erl" -> filename:basename(D, ".erl");
+                       _ -> []
+               end || D <- Deps],
+               case lists:usort(First0) of
+                       [] -> [];
+                       [[]] -> [];
+                       First -> ["COMPILE_FIRST +=", [[" ", F] || F <- First], "\n"]
+               end
+       end,
+       Depend = [begin
+               case epp:parse_file(F, ["include/"], []) of
+                       {ok, Forms} ->
+                               Deps = lists:usort(lists:foldl(fun
+                                       ({attribute, _, behavior, Dep}, Acc) -> Add(Dep, Acc);
+                                       ({attribute, _, behaviour, Dep}, Acc) -> Add(Dep, Acc);
+                                       ({attribute, _, compile, {parse_transform, Dep}}, Acc) -> Add(Dep, Acc);
+                                       ({attribute, _, file, {Dep, _}}, Acc) -> AddHd(Dep, Acc);
+                                       (_, Acc) -> Acc
+                               end, [], Forms)),
+                               case Deps of
+                                       [] -> "";
+                                       _ -> [F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n", CompileFirst(Deps)]
+                               end;
+                       {error, enoent} ->
+                               []
+               end
+       end || F <- ErlFiles],
+       ok = file:write_file("$(1)", Depend),
+       halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+       $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST)
+       @mkdir -p $(ERLANG_MK_TMP)
+       @if test -f $@; then \
+               touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+               touch -c $(PROJECT).d; \
+       fi
+       @touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+
+-include $(PROJECT).d
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+       $(verbose) mkdir -p ebin/
+
+define compile_erl
+       $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+               -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+       $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+       $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+       $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null || true))
+       $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+               $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+       $(app_verbose) printf "$(subst $(newline),\n,$(subst ",\",$(call app_file,$(GITDESCRIBE),$(MODULES))))" \
+               > ebin/$(PROJECT).app
+else
+       $(verbose) if [ -z "$$(grep -E '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+               echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk README for instructions." >&2; \
+               exit 1; \
+       fi
+       $(appsrc_verbose) cat src/$(PROJECT).app.src \
+               | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+               | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(GITDESCRIBE)\"}/" \
+               > ebin/$(PROJECT).app
+endif
+
+clean:: clean-app
+
+clean-app:
+       $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+               $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+               $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+               $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+               $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+       $(verbose) for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+       $(verbose) for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+       $(verbose) for dep in $(ALL_TEST_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir:
+       $(gen_verbose) erlc -v $(TEST_ERLC_OPTS) -I include/ -o $(TEST_DIR) \
+               $(call core_find,$(TEST_DIR)/,*.erl) -pa ebin/
+endif
+
+ifeq ($(wildcard ebin/test),)
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: clean deps test-deps $(PROJECT).d
+       $(verbose) $(MAKE) --no-print-directory app-build test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+       $(gen_verbose) touch ebin/test
+else
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: deps test-deps $(PROJECT).d
+       $(verbose) $(MAKE) --no-print-directory app-build test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+       $(gen_verbose) rm -f $(TEST_DIR)/*.beam
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+       $(if $(findstring +,$1),\
+               $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_rebar_config
+{deps, [$(call comma_list,$(foreach d,$(DEPS),\
+       {$(call dep_name,$d),".*",{git,"$(call dep_repo,$d)","$(call dep_commit,$d)"}}))]}.
+{erl_opts, [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$(ERLC_OPTS)),\
+       $(call compat_convert_erlc_opts,$o)))]}.
+endef
+
+$(eval _compat_rebar_config = $$(compat_rebar_config))
+$(eval export _compat_rebar_config)
+
+rebar.config:
+       $(gen_verbose) echo "$${_compat_rebar_config}" > rebar.config
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+
+docs:: asciidoc
+
+asciidoc: distclean-asciidoc doc-deps asciidoc-guide asciidoc-manual
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide:
+       a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+       a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+endif
+
+ifeq ($(wildcard doc/src/manual/*.asciidoc),)
+asciidoc-manual:
+else
+asciidoc-manual:
+       for f in doc/src/manual/*.asciidoc ; do \
+               a2x -v -f manpage $$f ; \
+       done
+       for s in $(MAN_SECTIONS); do \
+               mkdir -p doc/man$$s/ ; \
+               mv doc/src/manual/*.$$s doc/man$$s/ ; \
+               gzip doc/man$$s/*.$$s ; \
+       done
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+       for s in $(MAN_SECTIONS); do \
+               mkdir -p $(MAN_INSTALL_PATH)/man$$s/ ; \
+               install -g 0 -o 0 -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
+       done
+endif
+
+distclean:: distclean-asciidoc
+
+distclean-asciidoc:
+       $(gen_verbose) rm -rf doc/html/ doc/guide.pdf doc/man3/ doc/man7/
+
+# Copyright (c) 2014-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Bootstrap targets:" \
+               "  bootstrap          Generate a skeleton of an OTP application" \
+               "  bootstrap-lib      Generate a skeleton of an OTP library" \
+               "  bootstrap-rel      Generate the files needed to build a release" \
+               "  new-app n=NAME     Create a new local OTP application NAME" \
+               "  new-lib n=NAME     Create a new local OTP library NAME" \
+               "  new t=TPL n=NAME   Generate a module NAME based on the template TPL" \
+               "  new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+               "  list-templates     List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+       {description, ""},
+       {vsn, "0.1.0"},
+       {id, "git"},
+       {modules, []},
+       {registered, []},
+       {applications, [
+               kernel,
+               stdlib
+       ]},
+       {mod, {$p_app, []}},
+       {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+       {description, ""},
+       {vsn, "0.1.0"},
+       {id, "git"},
+       {modules, []},
+       {registered, []},
+       {applications, [
+               kernel,
+               stdlib
+       ]}
+]}.
+endef
+
+ifdef SP
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+
+include erlang.mk
+endef
+else
+define bs_Makefile
+PROJECT = $p
+include erlang.mk
+endef
+endif
+
+define bs_apps_Makefile
+PROJECT = $p
+include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+       $p_sup:start_link().
+
+stop(_State) ->
+       ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p]}.
+{extended_start_script, true}.
+{sys_config, "rel/sys.config"}.
+{vm_args, "rel/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+       supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+       Procs = [],
+       {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+       gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+       {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+       {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+       {noreply, State}.
+
+handle_info(_Info, State) ->
+       {noreply, State}.
+
+terminate(_Reason, _State) ->
+       ok.
+
+code_change(_OldVsn, State, _Extra) ->
+       {ok, State}.
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+       {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+       {ok, Req2} = cowboy_req:reply(200, Req),
+       {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+       ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+       gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+       {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+       {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+       {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+       {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+       {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+       {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+       ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+       {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+       {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+       {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+       ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+       {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+       {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+       {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+       {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+       Req2 = cowboy_req:compact(Req),
+       {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+       {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+       {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+       {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+       {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+       ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+       socket :: inet:socket(),
+       transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+       Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+       {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+       ok = ranch:accept_ack(Ref),
+       loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+       loop(State).
+endef
+
+# Plugin-specific targets.
+
+define render_template
+       $(verbose) printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+       $(error Error: src/ directory already exists)
+endif
+       $(eval p := $(PROJECT))
+       $(eval n := $(PROJECT)_sup)
+       $(call render_template,bs_Makefile,Makefile)
+       $(verbose) mkdir src/
+ifdef LEGACY
+       $(call render_template,bs_appsrc,src/$(PROJECT).app.src)
+endif
+       $(call render_template,bs_app,src/$(PROJECT)_app.erl)
+       $(call render_template,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+       $(error Error: src/ directory already exists)
+endif
+       $(eval p := $(PROJECT))
+       $(call render_template,bs_Makefile,Makefile)
+       $(verbose) mkdir src/
+ifdef LEGACY
+       $(call render_template,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+       $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard rel/),)
+       $(error Error: rel/ directory already exists)
+endif
+       $(eval p := $(PROJECT))
+       $(call render_template,bs_relx_config,relx.config)
+       $(verbose) mkdir rel/
+       $(call render_template,bs_sys_config,rel/sys.config)
+       $(call render_template,bs_vm_args,rel/vm.args)
+
+new-app:
+ifndef in
+       $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+       $(error Error: Application $in already exists)
+endif
+       $(eval p := $(in))
+       $(eval n := $(in)_sup)
+       $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+       $(call render_template,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+       $(call render_template,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+       $(call render_template,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+       $(call render_template,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+       $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+       $(error Error: Application $in already exists)
+endif
+       $(eval p := $(in))
+       $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+       $(call render_template,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+       $(call render_template,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+       $(error Error: src/ directory does not exist)
+endif
+ifndef t
+       $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef tpl_$(t)
+       $(error Unknown template)
+endif
+ifndef n
+       $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+       $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new t=$t n=$n in=
+else
+       $(call render_template,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+       $(verbose) echo Available templates: $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT).so
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),darwin)
+       CC ?= cc
+       CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall
+       LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+       CC ?= cc
+       CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+       CC ?= gcc
+       CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+
+LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C     " $(?F);
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP   " $(?F);
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD    " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+       $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+       $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+
+$(C_SRC_OUTPUT): $(OBJECTS)
+       $(verbose) mkdir -p priv/
+       $(link_verbose) $(CC) $(OBJECTS) \
+               $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+               -o $(C_SRC_OUTPUT)
+
+%.o: %.c
+       $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+       $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+       $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+       $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+       $(gen_verbose) rm -f $(C_SRC_OUTPUT) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+$(C_SRC_ENV):
+       $(verbose) $(ERL) -eval "file:write_file(\"$(C_SRC_ENV)\", \
+               io_lib:format( \
+                       \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+                       \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+                       \"ERL_INTERFACE_LIB_DIR ?= ~s~n\", \
+                       [code:root_dir(), erlang:system_info(version), \
+                       code:lib_dir(erl_interface, include), \
+                       code:lib_dir(erl_interface, lib)])), \
+               halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+       $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+       /* Initialize private data. */
+       *priv_data = NULL;
+
+       loads++;
+
+       return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+       /* Convert the private data to the new version. */
+       *priv_data = *old_priv_data;
+
+       loads++;
+
+       return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+       if (loads == 1) {
+               /* Destroy the private data. */
+       }
+
+       loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+       if (enif_is_atom(env, argv[0])) {
+               return enif_make_tuple2(env,
+                       enif_make_atom(env, "hello"),
+                       argv[0]);
+       }
+
+       return enif_make_tuple2(env,
+               enif_make_atom(env, "error"),
+               enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+       {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+       PrivDir = case code:priv_dir(?MODULE) of
+               {error, _} ->
+                       AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+                       filename:join(AppPath, "priv");
+               Path ->
+                       Path
+       end,
+       erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+       erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+       $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+       $(error Error: src/$n.erl already exists)
+endif
+ifdef in
+       $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+       $(verbose) mkdir -p $(C_SRC_DIR) src/
+       $(call render_template,bs_c_nif,$(C_SRC_DIR)/$n.c)
+       $(call render_template,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-setup distclean-kerl
+
+KERL ?= $(CURDIR)/kerl
+export KERL
+
+KERL_URL ?= https://raw.githubusercontent.com/yrashk/kerl/master/kerl
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+CI_INSTALL_DIR ?= $(HOME)/erlang
+CI_OTP ?=
+
+ifeq ($(strip $(CI_OTP)),)
+ci::
+else
+ci:: $(addprefix ci-,$(CI_OTP))
+
+ci-prepare: $(addprefix $(CI_INSTALL_DIR)/,$(CI_OTP))
+
+ci-setup::
+
+ci_verbose_0 = @echo " CI    " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$(1): $(CI_INSTALL_DIR)/$(1)
+       $(ci_verbose) \
+               PATH="$(CI_INSTALL_DIR)/$(1)/bin:$(PATH)" \
+               CI_OTP_RELEASE="$(1)" \
+               CT_OPTS="-label $(1)" \
+               $(MAKE) clean ci-setup tests
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp))))
+
+define ci_otp_target
+ifeq ($(wildcard $(CI_INSTALL_DIR)/$(1)),)
+$(CI_INSTALL_DIR)/$(1): $(KERL)
+       $(KERL) build git $(OTP_GIT) $(1) $(1)
+       $(KERL) install $(1) $(CI_INSTALL_DIR)/$(1)
+endif
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_otp_target,$(otp))))
+
+$(KERL):
+       $(gen_verbose) $(call core_http_get,$(KERL),$(KERL_URL))
+       $(verbose) chmod +x $(KERL)
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Continuous Integration targets:" \
+               "  ci          Run '$(MAKE) tests' on all configured Erlang versions." \
+               "" \
+               "The CI_OTP variable must be defined with the Erlang versions" \
+               "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+       $(gen_verbose) rm -rf $(KERL)
+endif
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+ifneq ($(wildcard $(TEST_DIR)),)
+       CT_SUITES ?= $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+else
+       CT_SUITES ?=
+endif
+
+# Core targets.
+
+tests:: ct
+
+distclean:: distclean-ct
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Common_test targets:" \
+               "  ct          Run all the common_test suites for this project" \
+               "" \
+               "All your common_test suites have their associated targets." \
+               "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+       -no_auto_compile \
+       -noinput \
+       -pa $(CURDIR)/ebin $(DEPS_DIR)/*/ebin $(TEST_DIR) \
+       -dir $(TEST_DIR) \
+       -logdir $(CURDIR)/logs
+
+ifeq ($(CT_SUITES),)
+ct:
+else
+ct: test-build
+       $(verbose) mkdir -p $(CURDIR)/logs/
+       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+       $(verbose) mkdir -p $(CURDIR)/logs/
+       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+       $(gen_verbose) rm -rf $(CURDIR)/logs/
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r src
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \
+       -Wunmatched_returns # -Wunderspecs
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Dialyzer targets:" \
+               "  plt         Build a PLT file for this project" \
+               "  dialyze     Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+$(DIALYZER_PLT): deps app
+       $(verbose) dialyzer --build_plt --apps erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS)
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+       $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze:
+else
+dialyze: $(DIALYZER_PLT)
+endif
+       $(verbose) dialyzer --no_native $(DIALYZER_DIRS) $(DIALYZER_OPTS)
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+
+# Core targets.
+
+docs:: distclean-edoc edoc
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: doc-deps
+       $(gen_verbose) $(ERL) -eval 'edoc:application($(PROJECT), ".", [$(EDOC_OPTS)]), halt().'
+
+distclean-edoc:
+       $(gen_verbose) rm -f doc/*.css doc/*.html doc/*.png doc/edoc-info
+
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: elvis distclean-elvis
+
+# Configuration.
+
+ELVIS_CONFIG ?= $(CURDIR)/elvis.config
+
+ELVIS ?= $(CURDIR)/elvis
+export ELVIS
+
+ELVIS_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis
+ELVIS_CONFIG_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis.config
+ELVIS_OPTS ?=
+
+# Core targets.
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Elvis targets:" \
+               "  elvis       Run Elvis using the local elvis.config or download the default otherwise"
+
+distclean:: distclean-elvis
+
+# Plugin-specific targets.
+
+$(ELVIS):
+       $(gen_verbose) $(call core_http_get,$(ELVIS),$(ELVIS_URL))
+       $(verbose) chmod +x $(ELVIS)
+
+$(ELVIS_CONFIG):
+       $(verbose) $(call core_http_get,$(ELVIS_CONFIG),$(ELVIS_CONFIG_URL))
+
+elvis: $(ELVIS) $(ELVIS_CONFIG)
+       $(verbose) $(ELVIS) rock -c $(ELVIS_CONFIG) $(ELVIS_OPTS)
+
+distclean-elvis:
+       $(gen_verbose) rm -rf $(ELVIS)
+
+# Copyright (c) 2014 Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+
+ESCRIPT_BEAMS ?= "ebin/*", "deps/*/ebin/*"
+ESCRIPT_SYS_CONFIG ?= "rel/sys.config"
+ESCRIPT_EMU_ARGS ?= -pa . \
+       -sasl errlog_type error \
+       -escript main $(ESCRIPT_NAME)
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_STATIC ?= "deps/*/priv/**", "priv/**"
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Escript targets:" \
+               "  escript     Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+# Based on https://github.com/synrc/mad/blob/master/src/mad_bundle.erl
+# Copyright (c) 2013 Maxim Sokhatsky, Synrc Research Center
+# Modified MIT License, https://github.com/synrc/mad/blob/master/LICENSE :
+# Software may only be used for the great good and the true happiness of all
+# sentient beings.
+
+define ESCRIPT_RAW
+'Read = fun(F) -> {ok, B} = file:read_file(filename:absname(F)), B end,'\
+'Files = fun(L) -> A = lists:concat([filelib:wildcard(X)||X<- L ]),'\
+'  [F || F <- A, not filelib:is_dir(F) ] end,'\
+'Squash = fun(L) -> [{filename:basename(F), Read(F) } || F <- L ] end,'\
+'Zip = fun(A, L) -> {ok,{_,Z}} = zip:create(A, L, [{compress,all},memory]), Z end,'\
+'Ez = fun(Escript) ->'\
+'  Static = Files([$(ESCRIPT_STATIC)]),'\
+'  Beams = Squash(Files([$(ESCRIPT_BEAMS), $(ESCRIPT_SYS_CONFIG)])),'\
+'  Archive = Beams ++ [{ "static.gz", Zip("static.gz", Static)}],'\
+'  escript:create(Escript, [ $(ESCRIPT_OPTIONS)'\
+'    {archive, Archive, [memory]},'\
+'    {shebang, "$(ESCRIPT_SHEBANG)"},'\
+'    {comment, "$(ESCRIPT_COMMENT)"},'\
+'    {emu_args, " $(ESCRIPT_EMU_ARGS)"}'\
+'  ]),'\
+'  file:change_mode(Escript, 8#755)'\
+'end,'\
+'Ez("$(ESCRIPT_NAME)"),'\
+'halt().'
+endef
+
+ESCRIPT_COMMAND = $(subst ' ',,$(ESCRIPT_RAW))
+
+escript:: distclean-escript deps app
+       $(gen_verbose) $(ERL) -eval $(ESCRIPT_COMMAND)
+
+distclean-escript:
+       $(gen_verbose) rm -f $(ESCRIPT_NAME)
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel distclean-relx-rel distclean-relx run
+
+# Configuration.
+
+RELX ?= $(CURDIR)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://github.com/erlware/relx/releases/download/v3.5.0/relx
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+       RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+       RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+endif
+endif
+
+distclean:: distclean-relx-rel distclean-relx
+
+# Plugin-specific targets.
+
+$(RELX):
+       $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+       $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+       $(verbose) $(RELX) -c $(RELX_CONFIG) $(RELX_OPTS)
+
+distclean-relx-rel:
+       $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+distclean-relx:
+       $(gen_verbose) rm -rf $(RELX)
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run:
+else
+
+define get_relx_release.erl
+       {ok, Config} = file:consult("$(RELX_CONFIG)"),
+       {release, {Name, _}, _} = lists:keyfind(release, 1, Config),
+       io:format("~s", [Name]),
+       halt(0).
+endef
+
+RELX_RELEASE = `$(call erlang,$(get_relx_release.erl))`
+
+run: all
+       $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_RELEASE)/bin/$(RELX_RELEASE) console
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Relx targets:" \
+               "  run         Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(APPS_DIR)/*/ebin $(DEPS_DIR)/*/ebin
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Shell targets:" \
+               "  shell       Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+       $(verbose) for dep in $(ALL_SHELL_DEPS_DIRS) ; do $(MAKE) -C $$dep ; done
+
+shell: build-shell-deps
+       $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+       code:add_pathsa(["$(CURDIR)/ebin", "$(DEPS_DIR)/*/ebin"]),
+       try
+               case $(1) of
+                       all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+                       module -> triq:check($(2));
+                       function -> triq:check($(2))
+               end
+       of
+               true -> halt(0);
+               _ -> halt(1)
+       catch error:undef ->
+               io:format("Undefined property or module~n"),
+               halt(0)
+       end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build
+       $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build
+       $(verbose) echo Testing $(t)/0
+       $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build
+       $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename $(wildcard ebin/*.beam))))))
+       $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+       XREF_ARGS :=
+else
+       XREF_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/0.2.2/xrefr
+
+# Core targets.
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Xref targets:" \
+               "  xref        Run Xrefr using $XREF_CONFIG as config file if defined"
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+       $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+       $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+       $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+       $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR = cover
+
+# Hook in coverage to ct
+
+ifdef COVER
+ifdef CT_RUN
+# All modules in 'ebin'
+COVER_MODS = $(notdir $(basename $(call core_ls,ebin/*.beam)))
+
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec:
+       $(verbose) echo Cover mods: $(COVER_MODS)
+       $(gen_verbose) printf "%s\n" \
+               '{incl_mods,[$(subst $(space),$(comma),$(COVER_MODS))]}.' \
+               '{export,"$(CURDIR)/ct.coverdata"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+       $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Cover targets:" \
+               "  cover-report  Generate a HTML coverage report from previously collected" \
+               "                cover data." \
+               "  all.coverdata Merge {eunit,ct}.coverdata into one coverdata file." \
+               "" \
+               "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+               "target tests additionally generates a HTML coverage report from the combined" \
+               "coverdata files from each of these testing tools. HTML reports can be disabled" \
+               "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out all.coverdata,$(wildcard *.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+       $(gen_verbose) rm -f *.coverdata ct.cover.spec
+
+# Merge all coverdata files into one.
+all.coverdata: $(COVERDATA)
+       $(gen_verbose) $(ERL) -eval ' \
+               $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),) \
+               cover:export("$@"), halt(0).'
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+       $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+       grep -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+       | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+       $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+       Ms = cover:imported_modules(),
+       [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+               ++ ".COVER.html", [html])  || M <- Ms],
+       Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+       EunitHrlMods = [$(EUNIT_HRL_MODS)],
+       Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+               true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+       TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+       TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+       TotalPerc = round(100 * TotalY / (TotalY + TotalN)),
+       {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+       io:format(F, "<!DOCTYPE html><html>~n"
+               "<head><meta charset=\"UTF-8\">~n"
+               "<title>Coverage report</title></head>~n"
+               "<body>~n", []),
+       io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+       io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+       [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+               "<td>~p%</td></tr>~n",
+               [M, M, round(100 * Y / (Y + N))]) || {M, {Y, N}} <- Report1],
+       How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+       Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+       io:format(F, "</table>~n"
+               "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+               "</body></html>", [How, Date]),
+       halt().
+endef
+
+cover-report:
+       $(gen_verbose) mkdir -p $(COVER_REPORT_DIR)
+       $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+       fetch-shell-deps
+
+ifneq ($(SKIP_DEPS),)
+fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps fetch-shell-deps:
+       @:
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+fetch-deps: $(ALL_DEPS_DIRS)
+fetch-doc-deps: $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+fetch-rel-deps: $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+fetch-test-deps: $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+fetch-shell-deps: $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+fetch-deps: $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+fetch-deps: $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+fetch-deps: $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+fetch-deps: $(ALL_SHELL_DEPS_DIRS)
+endif
+
+fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps fetch-shell-deps:
+ifndef IS_APP
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               $(MAKE) -C $$dep $@ IS_APP=1 || exit $$?; \
+       done
+endif
+ifneq ($(IS_DEP),1)
+       $(verbose) rm -f $(ERLANG_MK_TMP)/$@.log
+endif
+       $(verbose) mkdir -p $(ERLANG_MK_TMP)
+       $(verbose) for dep in $^ ; do \
+               if ! grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/$@.log; then \
+                       echo $$dep >> $(ERLANG_MK_TMP)/$@.log; \
+                       if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk)$$" \
+                        $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+                               $(MAKE) -C $$dep fetch-deps IS_DEP=1 || exit $$?; \
+                       fi \
+               fi \
+       done
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+       list-shell-deps
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+       $(verbose) :> $@
+else
+LIST_DIRS = $(ALL_DEPS_DIRS)
+LIST_DEPS = $(BUILD_DEPS) $(DEPS)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): fetch-deps
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): LIST_DIRS += $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): LIST_DEPS += $(DOC_DEPS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): fetch-doc-deps
+else
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): fetch-deps
+endif
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): LIST_DIRS += $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): LIST_DEPS += $(REL_DEPS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): fetch-rel-deps
+else
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): fetch-deps
+endif
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): LIST_DIRS += $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): LIST_DEPS += $(TEST_DEPS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): fetch-test-deps
+else
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): fetch-deps
+endif
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): LIST_DIRS += $(ALL_SHELL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): LIST_DEPS += $(SHELL_DEPS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): fetch-shell-deps
+else
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): fetch-deps
+endif
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ifneq ($(IS_DEP),1)
+       $(verbose) rm -f $@.orig
+endif
+ifndef IS_APP
+       $(verbose) for app in $(filter-out $(CURDIR),$(ALL_APPS_DIRS)); do \
+               $(MAKE) -C "$$app" --no-print-directory $@ IS_APP=1 || :; \
+       done
+endif
+       $(verbose) for dep in $(filter-out $(CURDIR),$(LIST_DIRS)); do \
+               if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk)$$" \
+                $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+                       $(MAKE) -C "$$dep" --no-print-directory $@ IS_DEP=1; \
+               fi; \
+       done
+       $(verbose) for dep in $(LIST_DEPS); do \
+               echo $(DEPS_DIR)/$$dep; \
+       done >> $@.orig
+ifndef IS_APP
+ifneq ($(IS_DEP),1)
+       $(verbose) sort < $@.orig | uniq > $@
+       $(verbose) rm -f $@.orig
+endif
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+ifneq ($(SKIP_DEPS),)
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+       @:
+else
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(IS_DEP),1)
+ifneq ($(filter doc,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+endif
+endif
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+       $(verbose) cat $^ | sort | uniq
+endif # ifneq ($(SKIP_DEPS),)
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/include/rabbit_jms_topic_exchange.hrl b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/include/rabbit_jms_topic_exchange.hrl
new file mode 100644 (file)
index 0000000..265bce9
--- /dev/null
@@ -0,0 +1,79 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2012, 2013 Pivotal Software, Inc.  All rights reserved.
+%% -----------------------------------------------------------------------------
+
+%% JMS on Rabbit Topic Selector Exchange plugin definitions
+
+%% -----------------------------------------------------------------------------
+%% User-defined exchange type name
+-define(X_TYPE_NAME, <<"x-jms-topic">>).
+
+%% -----------------------------------------------------------------------------
+%% mnesia database records
+-define(JMS_TOPIC_TABLE, x_jms_topic_table).
+-define(JMS_TOPIC_RECORD, x_jms_topic_xs).
+
+%% Key is x_name -- the exchange name
+-record(?JMS_TOPIC_RECORD, {x_name, x_selection_policy = undefined, x_selector_funs}).
+%% fields:
+%%  x_selector_funs
+%%      a partial map (`dict`) of binding functions:
+%%          dict: RoutingKey x DestName -/-> BindingSelectorFun
+%%      (there is no default, but an empty map will be initially inserted)
+%%      where a BindingSelectorFun has the signature:
+%%          bsf : Headers -> boolean
+%%  x_selection_policy
+%%      not used, retained for backwards compatibility of db records.
+%% -----------------------------------------------------------------------------
+
+%% -----------------------------------------------------------------------------
+%% Name of arg on exchange creation and bindings. Used to supply client version
+%% for plugin check.
+%%      private static final String RJMS_VERSION_ARG = "rjms_version";
+%% in JMS Client.
+%% If absent, client version assumed to be < 1.2.0.
+-define(RJMS_VERSION_ARG, <<"rjms_version">>).
+%% -----------------------------------------------------------------------------
+
+%% -----------------------------------------------------------------------------
+%% Name of arg on binding used to specify erlang term -- string type
+%%      private static final String RJMS_COMPILED_SELECTOR_ARG = "rjms_erlang_selector";
+%% in JMS Client.
+-define(RJMS_COMPILED_SELECTOR_ARG, <<"rjms_erlang_selector">>).
+%% -----------------------------------------------------------------------------
+
+%% -----------------------------------------------------------------------------
+%% List of versions compatible with this level of topic exchange.
+-define(RJMS_COMPATIBLE_VERSIONS, [ "1.4.7"             % current build release
+                                 %, "1.4.5"             % release omitted
+                                  , "1.4.4"
+                                 %, "1.4.3"             % release omitted
+                                 %, "1.4.2"             % release omitted
+                                  , "1.4.1"
+                                  , "1.3.4"
+                                  , "1.3.3"
+                                  , "1.3.2"
+                                  , "1.3.1"
+                                 %, "1.3.0"             % release aborted
+                                  , "1.2.5"
+                                  , "1.2.4"
+                                  , "1.2.3"
+                                 %, "1.2.2"             % release omitted
+                                  , "1.2.1"
+                                  , "1.2.0"
+                                  , ""                  % development use only
+                                  , "0.0.0"             % development use only
+                                  ]).
+%% -----------------------------------------------------------------------------
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/rabbitmq-components.mk b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/rabbitmq-components.mk
new file mode 100644 (file)
index 0000000..eb9e9e3
--- /dev/null
@@ -0,0 +1,345 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# Automatically add rabbitmq-common to the dependencies, at least for
+# the Makefiles.
+ifneq ($(PROJECT),rabbit_common)
+ifneq ($(PROJECT),rabbitmq_public_umbrella)
+ifeq ($(filter rabbit_common,$(DEPS)),)
+DEPS += rabbit_common
+endif
+endif
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client                       = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit                            = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common                     = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0                  = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp        = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http        = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap        = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl       = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser    = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_clusterer                = git_rmq rabbitmq-clusterer $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen                  = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client            = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange      = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes        = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management        = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
+dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella          = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# FIXME: As of 2015-11-20, we depend on Ranch 1.2.1, but erlang.mk
+# defaults to Ranch 1.1.0. All projects depending indirectly on Ranch
+# needs to add "ranch" as a BUILD_DEPS. The list of projects needing
+# this workaround are:
+#     o  rabbitmq-web-stomp
+dep_ranch = git https://github.com/ninenines/ranch 1.2.1
+
+RABBITMQ_COMPONENTS = amqp_client \
+                     rabbit \
+                     rabbit_common \
+                     rabbitmq_amqp1_0 \
+                     rabbitmq_auth_backend_amqp \
+                     rabbitmq_auth_backend_http \
+                     rabbitmq_auth_backend_ldap \
+                     rabbitmq_auth_mechanism_ssl \
+                     rabbitmq_boot_steps_visualiser \
+                     rabbitmq_clusterer \
+                     rabbitmq_codegen \
+                     rabbitmq_consistent_hash_exchange \
+                     rabbitmq_delayed_message_exchange \
+                     rabbitmq_dotnet_client \
+                     rabbitmq_event_exchange \
+                     rabbitmq_federation \
+                     rabbitmq_federation_management \
+                     rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
+                     rabbitmq_lvc \
+                     rabbitmq_management \
+                     rabbitmq_management_agent \
+                     rabbitmq_management_exchange \
+                     rabbitmq_management_themes \
+                     rabbitmq_management_visualiser \
+                     rabbitmq_message_timestamp \
+                     rabbitmq_metronome \
+                     rabbitmq_mqtt \
+                     rabbitmq_objc_client \
+                     rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
+                     rabbitmq_rtopic_exchange \
+                     rabbitmq_sharding \
+                     rabbitmq_shovel \
+                     rabbitmq_shovel_management \
+                     rabbitmq_stomp \
+                     rabbitmq_test \
+                     rabbitmq_toke \
+                     rabbitmq_top \
+                     rabbitmq_tracing \
+                     rabbitmq_trust_store \
+                     rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
+                     rabbitmq_web_stomp \
+                     rabbitmq_web_stomp_examples \
+                     rabbitmq_website
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+       ref=$$(git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+       if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+base_rmq_ref := $(shell \
+       (git rev-parse --verify -q stable >/dev/null && \
+         git merge-base --is-ancestor $$(git merge-base master HEAD) stable && \
+         echo stable) || \
+       echo master)
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+#   - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+#   target's properties:
+#       eg. rabbitmq-common is replaced by rabbitmq-codegen
+#       eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Maccro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+#   1. /foo.git -> /bar.git
+#   2. /foo     -> /bar
+#   3. /foo/    -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)),                                    \
+                 $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))),       \
+                 $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+       fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+       fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+       if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+        git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+           fetch_url="$$$$fetch_url1"; \
+           push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+       elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+           fetch_url="$$$$fetch_url2"; \
+           push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+       fi; \
+       cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+       $(foreach ref,$(call dep_rmq_commits,$(1)), \
+         git checkout -q $(ref) >/dev/null 2>&1 || \
+         ) \
+       (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+         1>&2 && false) ) && \
+       (test "$$$$fetch_url" = "$$$$push_url" || \
+        git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+       @:
+
+prepare-dist::
+       @:
+
+# --------------------------------------------------------------------
+# Run a RabbitMQ node (moved from rabbitmq-run.mk as a workaround).
+# --------------------------------------------------------------------
+
+# Add "rabbit" to the build dependencies when the user wants to start
+# a broker or to the test dependencies when the user wants to test a
+# project.
+#
+# NOTE: This should belong to rabbitmq-run.mk. Unfortunately, it is
+# loaded *after* erlang.mk which is too late to add a dependency. That's
+# why rabbitmq-components.mk knows the list of targets which start a
+# broker and add "rabbit" to the dependencies in this case.
+
+ifneq ($(PROJECT),rabbit)
+ifeq ($(filter rabbit,$(DEPS) $(BUILD_DEPS)),)
+RUN_RMQ_TARGETS = run-broker \
+                 run-background-broker \
+                 run-node \
+                 run-background-node \
+                 start-background-node
+
+ifneq ($(filter $(RUN_RMQ_TARGETS),$(MAKECMDGOALS)),)
+BUILD_DEPS += rabbit
+endif
+endif
+
+ifeq ($(filter rabbit,$(DEPS) $(BUILD_DEPS) $(TEST_DEPS)),)
+ifneq ($(filter check tests tests-with-broker test,$(MAKECMDGOALS)),)
+TEST_DEPS += rabbit
+endif
+endif
+endif
+
+ifeq ($(filter rabbit_public_umbrella amqp_client rabbit_common rabbitmq_test,$(PROJECT)),)
+ifeq ($(filter rabbitmq_test,$(DEPS) $(BUILD_DEPS) $(TEST_DEPS)),)
+TEST_DEPS += rabbitmq_test
+endif
+endif
+
+# --------------------------------------------------------------------
+# rabbitmq-components.mk checks.
+# --------------------------------------------------------------------
+
+ifeq ($(PROJECT),rabbit_common)
+else ifdef SKIP_RMQCOMP_CHECK
+else ifeq ($(IS_DEP),1)
+else ifneq ($(filter co up,$(MAKECMDGOALS)),)
+else
+# In all other cases, rabbitmq-components.mk must be in sync.
+deps:: check-rabbitmq-components.mk
+fetch-deps: check-rabbitmq-components.mk
+endif
+
+# If this project is under the Umbrella project, we override $(DEPS_DIR)
+# to point to the Umbrella's one. We also disable `make distclean` so
+# $(DEPS_DIR) is not accidentally removed.
+
+ifneq ($(wildcard ../../UMBRELLA.md),)
+UNDER_UMBRELLA = 1
+else ifneq ($(wildcard UMBRELLA.md),)
+UNDER_UMBRELLA = 1
+endif
+
+ifeq ($(UNDER_UMBRELLA),1)
+ifneq ($(PROJECT),rabbitmq_public_umbrella)
+DEPS_DIR ?= $(abspath ..)
+
+distclean:: distclean-components
+       @:
+
+distclean-components:
+endif
+
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
+
+UPSTREAM_RMQ_COMPONENTS_MK = $(DEPS_DIR)/rabbit_common/mk/rabbitmq-components.mk
+
+check-rabbitmq-components.mk:
+       $(verbose) cmp -s rabbitmq-components.mk \
+               $(UPSTREAM_RMQ_COMPONENTS_MK) || \
+               (echo "error: rabbitmq-components.mk must be updated!" 1>&2; \
+                 false)
+
+ifeq ($(PROJECT),rabbit_common)
+rabbitmq-components-mk:
+       @:
+else
+rabbitmq-components-mk:
+       $(gen_verbose) cp -a $(UPSTREAM_RMQ_COMPONENTS_MK) .
+ifeq ($(DO_COMMIT),yes)
+       $(verbose) git diff --quiet rabbitmq-components.mk \
+       || git commit -m 'Update rabbitmq-components.mk' rabbitmq-components.mk
+endif
+endif
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/src/rabbit_jms_topic_exchange.erl b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/src/rabbit_jms_topic_exchange.erl
new file mode 100644 (file)
index 0000000..4bf639e
--- /dev/null
@@ -0,0 +1,317 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2012, 2013 Pivotal Software, Inc.  All rights reserved.
+%% -----------------------------------------------------------------------------
+
+%% JMS on Rabbit Selector Exchange plugin
+
+%% -----------------------------------------------------------------------------
+-module(rabbit_jms_topic_exchange).
+
+-behaviour(rabbit_exchange_type).
+
+-include("rabbit_jms_topic_exchange.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+%% Rabbit exchange type functions:
+-export([ description/0
+        , serialise_events/0
+        , route/2
+        , validate/1
+        , create/2
+        , delete/3
+        , validate_binding/2
+        , add_binding/3
+        , remove_bindings/3
+        , assert_args_equivalence/2
+        , policy_changed/2 ]).
+
+%% Initialisation of database function:
+-export([setup_db_schema/0]).
+
+%%----------------------------------------------------------------------------
+
+%% Register exchange type
+-rabbit_boot_step({ ?MODULE
+                  , [ {description, "exchange type JMS topic selector"}
+                    , {mfa, {rabbit_registry, register, [exchange, ?X_TYPE_NAME, ?MODULE]}}
+                    , {cleanup, {rabbit_registry, unregister, [exchange, ?X_TYPE_NAME]}}
+                    , {requires, rabbit_registry}
+                    , {enables, kernel_ready} ] }).
+
+%% Initialise database
+-rabbit_boot_step({ rabbit_jms_topic_exchange_mnesia
+                  , [ {description, "database exchange type JMS topic selector"}
+                    , {mfa, {?MODULE, setup_db_schema, []}}
+                    , {requires, database}
+                    , {enables, external_infrastructure} ] }).
+
+%%----------------------------------------------------------------------------
+
+% Initialise database table for all exchanges of type <<"x-jms-topic">>
+setup_db_schema() ->
+  case mnesia:create_table( ?JMS_TOPIC_TABLE
+                          , [ {attributes, record_info(fields, ?JMS_TOPIC_RECORD)}
+                            , {record_name, ?JMS_TOPIC_RECORD}
+                            , {type, set} ]
+                          ) of
+    {atomic, ok} -> ok;
+    {aborted, {already_exists, ?JMS_TOPIC_TABLE}} -> ok
+  end.
+
+%%----------------------------------------------------------------------------
+%% R E F E R E N C E   T Y P E   I N F O R M A T I O N
+
+%% -type(binding() ::
+%%         #binding{source      :: rabbit_exchange:name(),
+%%                  destination :: binding_destination(),
+%%                  key         :: rabbit_binding:key(),
+%%                  args        :: rabbit_framing:amqp_table()}).
+%%
+%% -type(exchange() ::
+%%         #exchange{name        :: rabbit_exchange:name(),
+%%                   type        :: rabbit_exchange:type(),
+%%                   durable     :: boolean(),
+%%                   auto_delete :: boolean(),
+%%                   arguments   :: rabbit_framing:amqp_table()}).
+%%
+%% -type(amqp_field_type() ::
+%%       'longstr' | 'signedint' | 'decimal' | 'timestamp' |
+%%       'table' | 'byte' | 'double' | 'float' | 'long' |
+%%       'short' | 'bool' | 'binary' | 'void' | 'array').
+
+%%----------------------------------------------------------------------------
+%% E X P O R T E D   E X C H A N G E   B E H A V I O U R
+
+% Exchange description
+description() -> [ {name, <<"jms-selector">>}
+                 , {description, <<"JMS selector exchange">>} ].
+
+% Binding event serialisation
+serialise_events() -> false.
+
+% Route messages
+route( #exchange{name = XName}
+     , #delivery{message = #basic_message{content = MessageContent, routing_keys = RKs}}
+     ) ->
+  BindingFuns = get_binding_funs_x(XName),
+  match_bindings(XName, RKs, MessageContent, BindingFuns).
+
+
+% Before exchange declaration
+validate(_X) -> ok.
+
+% After exchange declaration and recovery
+create(transaction, #exchange{name = XName, arguments = XArgs}) ->
+  check_version_arg(XName, XArgs),
+  add_initial_record(XName);
+create(_Tx, _X) ->
+  ok.
+
+% Delete an exchange
+delete(transaction, #exchange{name = XName}, _Bs) ->
+  delete_state(XName),
+  ok;
+delete(_Tx, _X, _Bs) ->
+  ok.
+
+% Before add binding
+validate_binding(_X, _B) -> ok.
+
+% A new binding has ben added or recovered
+add_binding( Tx
+           , #exchange{name = XName}
+           , #binding{key = BindingKey, destination = Dest, args = Args}
+           ) ->
+  check_version_arg(XName, Args),
+  Selector = get_string_arg(Args, ?RJMS_COMPILED_SELECTOR_ARG),
+  BindGen = generate_binding_fun(Selector),
+  case {Tx, BindGen} of
+    {transaction, {ok, BindFun}} ->
+      add_binding_fun(XName, {{BindingKey, Dest}, BindFun});
+    {none, {error, _}} ->
+      parsing_error(XName, Selector, Dest);
+    _ ->
+      ok
+  end,
+  ok.
+
+% Binding removal
+remove_bindings( transaction
+               , #exchange{name = XName}
+               , Bindings
+               ) ->
+  remove_binding_funs(XName, Bindings),
+  ok;
+remove_bindings(_Tx, _X, _Bs) ->
+  ok.
+
+% Exchange argument equivalence
+assert_args_equivalence(X, Args) ->
+  rabbit_exchange:assert_args_equivalence(X, Args).
+
+% Policy change notifications ignored
+policy_changed(_X1, _X2) -> ok.
+
+%%----------------------------------------------------------------------------
+%% P R I V A T E   F U N C T I O N S
+
+% Check version argument, if supplied
+check_version_arg(XName, Args) ->
+  Version = get_string_arg(Args, ?RJMS_VERSION_ARG, "pre-1.2.0"),
+  case lists:member(Version, ?RJMS_COMPATIBLE_VERSIONS) of
+    true  -> ok;
+    false -> client_version_error(XName, Version)
+  end.
+
+% Get a string argument from the args or arguments parameters
+get_string_arg(Args, ArgName) -> get_string_arg(Args, ArgName, error).
+
+get_string_arg(Args, ArgName, Default) ->
+  case rabbit_misc:table_lookup(Args, ArgName) of
+    {longstr, BinVal} -> binary_to_list(BinVal);
+    _ -> Default
+  end.
+
+% Match bindings for the current message
+match_bindings( XName, _RoutingKeys, MessageContent, BindingFuns) ->
+  MessageHeaders = get_headers(MessageContent),
+  rabbit_router:match_bindings( XName
+                              , fun(#binding{key = Key, destination = Dest}) ->
+                                  binding_fun_match({Key, Dest}, MessageHeaders, BindingFuns)
+                                end
+                              ).
+
+% Select binding function from Funs dictionary, apply it to Headers and return result (true|false)
+binding_fun_match(DictKey, Headers, FunsDict) ->
+  case dict:find(DictKey, FunsDict) of
+    {ok, Fun} when is_function(Fun, 1) -> Fun(Headers);
+    error                              -> false          % do not match if no function found
+  end.
+
+% get Headers from message content
+get_headers(Content) ->
+  case (Content#content.properties)#'P_basic'.headers of
+    undefined -> [];
+    H         -> rabbit_misc:sort_field_table(H)
+  end.
+
+% generate the function that checks the message against the selector
+generate_binding_fun(ERL) ->
+  case decode_term(ERL) of
+    {error, _}    -> error;
+    {ok, ErlTerm} -> check_fun(ErlTerm)
+  end.
+
+% build checking function from compiled expression
+check_fun(CompiledExp) ->
+  { ok,
+    fun(Headers) ->
+      selector_match(CompiledExp, Headers)
+    end
+  }.
+
+% get an erlang term from a string
+decode_term(Str) ->
+  try
+    {ok, Ts, _} = erl_scan:string(Str),
+    {ok, Term} = erl_parse:parse_term(Ts),
+    {ok, Term}
+  catch
+    Err -> {error, {invalid_erlang_term, Err}}
+  end.
+
+% Evaluate the selector and check against the Headers
+selector_match(Selector, Headers) ->
+  case sjx_evaluator:evaluate(Selector, Headers) of
+    true -> true;
+    _    -> false
+  end.
+
+% get binding funs from state (using dirty_reads)
+get_binding_funs_x(XName) ->
+  mnesia:async_dirty(
+    fun() ->
+      #?JMS_TOPIC_RECORD{x_selector_funs = BindingFuns} = read_state(XName),
+      BindingFuns
+    end,
+    []
+  ).
+
+add_initial_record(XName) ->
+  write_state_fun(XName, dict:new()).
+
+% add binding fun to binding fun dictionary
+add_binding_fun(XName, BindingKeyAndFun) ->
+  #?JMS_TOPIC_RECORD{x_selector_funs = BindingFuns} = read_state_for_update(XName),
+  write_state_fun(XName, put_item(BindingFuns, BindingKeyAndFun)).
+
+% remove binding funs from binding fun dictionary
+remove_binding_funs(XName, Bindings) ->
+  BindingKeys = [ {BindingKey, DestName} || #binding{key = BindingKey, destination = DestName} <- Bindings ],
+  #?JMS_TOPIC_RECORD{x_selector_funs = BindingFuns} = read_state_for_update(XName),
+  write_state_fun(XName, remove_items(BindingFuns, BindingKeys)).
+
+% add an item to the dictionary of binding functions
+put_item(Dict, {Key, Item}) -> dict:store(Key, Item, Dict).
+
+% remove a list of keyed items from the dictionary, by key
+remove_items(Dict, []) -> Dict;
+remove_items(Dict, [Key | Keys]) -> remove_items(dict:erase(Key, Dict), Keys).
+
+% delete all the state saved for this exchange
+delete_state(XName) ->
+  mnesia:delete(?JMS_TOPIC_TABLE, XName, write).
+
+% Basic read for update
+read_state_for_update(XName) -> read_state(XName, write).
+
+% Basic read
+read_state(XName) -> read_state(XName, read).
+
+% Lockable read
+read_state(XName, Lock) ->
+  case mnesia:read(?JMS_TOPIC_TABLE, XName, Lock) of
+    [Rec] -> Rec;
+    _     -> exchange_state_corrupt_error(XName)
+  end.
+
+% Basic write
+write_state_fun(XName, BFuns) ->
+  mnesia:write( ?JMS_TOPIC_TABLE
+              , #?JMS_TOPIC_RECORD{x_name = XName, x_selector_funs = BFuns}
+              , write ).
+
+%%----------------------------------------------------------------------------
+%% E R R O R S
+
+% state error
+exchange_state_corrupt_error(#resource{name = XName}) ->
+  rabbit_misc:protocol_error( internal_error
+                            , "exchange named '~s' has no saved state or incorrect saved state"
+                            , [XName] ).
+
+% version error
+client_version_error(#resource{name = XName}, Version) ->
+  rabbit_misc:protocol_error( internal_error
+                            , "client version '~s' incompatible with plugin for operation on exchange named '~s'"
+                            , [Version, XName] ).
+
+% parsing error
+parsing_error(#resource{name = XName}, S, #resource{name = DestName}) ->
+  rabbit_misc:protocol_error( precondition_failed
+                            , "cannot parse selector '~p' binding destination '~s' to exchange '~s'"
+                            , [S, DestName, XName] ).
+
+%%----------------------------------------------------------------------------
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/src/rabbitmq_jms_topic_exchange.app.src b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/src/rabbitmq_jms_topic_exchange.app.src
new file mode 100644 (file)
index 0000000..dc99ab3
--- /dev/null
@@ -0,0 +1,8 @@
+{ application, rabbitmq_jms_topic_exchange
+, [ {description, "RabbitMQ JMS topic selector exchange plugin"}
+  , {vsn, "3.6.5"}
+  , {modules, []}
+  , {registered, []}
+  , {applications, [kernel, stdlib, rabbit, mnesia]}
+  ]
+}.
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/src/sjx_evaluator.erl b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/src/sjx_evaluator.erl
new file mode 100644 (file)
index 0000000..e41d079
--- /dev/null
@@ -0,0 +1,178 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2012, 2013 Pivotal Software, Inc.  All rights reserved.
+%% -----------------------------------------------------------------------------
+%% Derived from works which were:
+%% Copyright (c) 2002, 2012 Tim Watson (watson.timothy@gmail.com)
+%% Copyright (c) 2012, 2013 Steve Powell (Zteve.Powell@gmail.com)
+%% -----------------------------------------------------------------------------
+
+%% Evaluate an SQL expression for filtering purposes
+
+%% -----------------------------------------------------------------------------
+
+-module(sjx_evaluator).
+
+-export([evaluate/2]).
+%% Evaluation function
+%%
+%%   Given Headers (a list of keyed typed values), and a
+%%   parsed SQL string, evaluate the truth or falsity of the expression.
+%%
+%%   If an identifier is absent from Headers, or the types do not match the comparisons, the
+%%   expression will evaluate to false.
+
+-type itemname() :: binary().
+-type itemtype() ::
+      'longstr' | 'signedint' | 'byte' | 'double' | 'float' | 'long' | 'short' | 'bool'.
+-type itemvalue() :: any().
+
+-type tableitem() :: { itemname(), itemtype(), itemvalue() }.
+-type table() :: list(tableitem()).
+
+-type expression() :: any().
+
+-spec evaluate(expression(), table()) -> true | false | error.
+
+
+evaluate( true,                           _Headers ) -> true;
+evaluate( false,                          _Headers ) -> false;
+
+evaluate( {'not', Exp },                   Headers ) -> not3(evaluate(Exp, Headers));
+evaluate( {'ident', Ident },               Headers ) -> lookup_value(Headers, Ident);
+evaluate( {'is_null', Exp },               Headers ) -> val_of(Exp, Headers) =:= undefined;
+evaluate( {'not_null', Exp },              Headers ) -> val_of(Exp, Headers) =/= undefined;
+evaluate( { Op, Exp },                     Headers ) -> do_una_op(Op, evaluate(Exp, Headers));
+
+evaluate( {'and', Exp1, Exp2 },            Headers ) -> and3(evaluate(Exp1, Headers), evaluate(Exp2, Headers));
+evaluate( {'or', Exp1, Exp2 },             Headers ) -> or3(evaluate(Exp1, Headers), evaluate(Exp2, Headers));
+evaluate( {'like', LHS, Patt },            Headers ) -> isLike(val_of(LHS, Headers), Patt);
+evaluate( {'not_like', LHS, Patt },        Headers ) -> not3(isLike(val_of(LHS, Headers), Patt));
+evaluate( { Op, Exp, {range, From, To} },  Headers ) -> evaluate({ Op, Exp, From, To }, Headers);
+evaluate( {'between', Exp, From, To},           Hs ) -> between(evaluate(Exp, Hs), evaluate(From, Hs), evaluate(To, Hs));
+evaluate( {'not_between', Exp, From, To},       Hs ) -> not3(between(evaluate(Exp, Hs), evaluate(From, Hs), evaluate(To, Hs)));
+evaluate( { Op, LHS, RHS },                Headers ) -> do_bin_op(Op, evaluate(LHS, Headers), evaluate(RHS, Headers));
+
+evaluate( Value,                          _Headers ) -> Value.
+
+not3(true ) -> false;
+not3(false) -> true;
+not3(_    ) -> undefined.
+
+and3(true,  true ) -> true;
+and3(false, _    ) -> false;
+and3(_,     false) -> false;
+and3(_,     _    ) -> undefined.
+
+or3(false, false) -> false;
+or3(true,  _    ) -> true;
+or3(_,     true ) -> true;
+or3(_,     _    ) -> undefined.
+
+do_una_op(_, undefined)  -> undefined;
+do_una_op('-', E) -> -E;
+do_una_op('+', E) -> +E;
+do_una_op(_,   _) -> error.
+
+do_bin_op(_, undefined, _)  -> undefined;
+do_bin_op(_, _, undefined ) -> undefined;
+do_bin_op('=' , L, R) -> L == R;
+do_bin_op('<>', L, R) -> L /= R;
+do_bin_op('>' , L, R) -> L > R;
+do_bin_op('<' , L, R) -> L < R;
+do_bin_op('>=', L, R) -> L >= R;
+do_bin_op('<=', L, R) -> L =< R;
+do_bin_op('in', L, R) -> isIn(L, R);
+do_bin_op('not_in', L, R) -> not isIn(L, R);
+do_bin_op('+' , L, R) -> L + R;
+do_bin_op('-' , L, R) -> L - R;
+do_bin_op('*' , L, R) -> L * R;
+do_bin_op('/' , L, R) when R /= 0 -> L / R;
+do_bin_op('/' , L, R) when L > 0 andalso R == 0 -> plus_infinity;
+do_bin_op('/' , L, R) when L < 0 andalso R == 0 -> minus_infinity;
+do_bin_op('/' , L, R) when L == 0 andalso R == 0 -> nan;
+do_bin_op(_,_,_) -> error.
+
+isLike(undefined, _Patt) -> undefined;
+isLike(L, {regex, MP}) -> patt_match(L, MP);
+isLike(L, {Patt, Esc}) -> patt_match(L, pattern_of(Patt, Esc)).
+
+patt_match(L, MP) ->
+  BS = byte_size(L),
+  case re:run(L, MP, [{capture, first}]) of
+    {match, [{0, BS}]} -> true;
+    _                  -> false
+  end.
+
+isIn(_L, []   ) -> false;
+isIn( L, [L|_]) -> true;
+isIn( L, [_|R]) -> isIn(L,R).
+
+val_of({'ident', Ident}, Hs) -> lookup_value(Hs, Ident);
+val_of(Value,           _Hs) -> Value.
+
+between(E, F, T) when E =:= undefined orelse F =:= undefined orelse T =:= undefined -> undefined;
+between(Value, Lo, Hi) -> Lo =< Value andalso Value =< Hi.
+
+lookup_value(Table, Key) ->
+  case lists:keyfind(Key, 1, Table) of
+    {_, longstr,   Value} -> Value;
+    {_, signedint, Value} -> Value;
+    {_, float,     Value} -> Value;
+    {_, double,    Value} -> Value;
+    {_, byte,      Value} -> Value;
+    {_, short,     Value} -> Value;
+    {_, long,      Value} -> Value;
+    {_, bool,      Value} -> Value;
+    false                 -> undefined
+  end.
+
+pattern_of(S, Esc) -> compile_re(gen_re(binary_to_list(S), Esc)).
+
+gen_re(S, <<Ch>>   ) -> convert(S, [], Ch       );
+gen_re(S, no_escape) -> convert(S, [], no_escape);
+gen_re(_,_) -> error.
+
+convert([],               Acc, _Esc) -> lists:reverse(Acc);
+convert([Esc, Ch | Rest], Acc,  Esc) -> convert(Rest, [escape(Ch) | Acc], Esc);
+convert([$_ | Rest],      Acc,  Esc) -> convert(Rest, [$.         | Acc], Esc);
+convert([$% | Rest],      Acc,  Esc) -> convert(Rest, [".*"       | Acc], Esc);
+convert([Ch | Rest],      Acc,  Esc) -> convert(Rest, [escape(Ch) | Acc], Esc).
+
+escape($.)  -> "\\.";
+escape($*)  -> "\\*";
+escape($+)  -> "\\+";
+escape($?)  -> "\\?";
+escape($^)  -> "\\^";
+escape($=)  -> "\\=";
+escape($!)  -> "\\!";
+escape($:)  -> "\\:";
+escape($$)  -> "\\$";
+escape(${)  -> "\\{";
+escape($})  -> "\\}";
+escape($()  -> "\\(";
+escape($))  -> "\\)";
+escape($|)  -> "\\|";
+escape($[)  -> "\\[";
+escape($])  -> "\\]";
+escape($/)  -> "\\/";
+escape($\\) -> "\\\\";
+escape(Ch)  -> Ch.
+
+compile_re(error) -> error;
+compile_re(MatchMany) ->
+    case re:compile(MatchMany)
+    of  {ok, Rx} -> Rx;
+        _        -> error
+    end.
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_SUITE.erl b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_SUITE.erl
new file mode 100644 (file)
index 0000000..341bb97
--- /dev/null
@@ -0,0 +1,149 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2013 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rjms_topic_selector_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include("rabbit_jms_topic_exchange.hrl").
+
+-import(rabbit_ct_client_helpers, [open_connection_and_channel/1,
+                                   close_connection_and_channel/2]).
+
+%% Useful test constructors
+-define(BSELECTARG(BinStr), {?RJMS_COMPILED_SELECTOR_ARG, longstr, BinStr}).
+-define(BASICMSG(Payload, Hdrs), #'amqp_msg'{props=#'P_basic'{headers=Hdrs}, payload=Payload}).
+-define(VERSION_ARG, {?RJMS_VERSION_ARG, longstr, <<"1.4.7">>}).
+
+all() ->
+    [
+      {group, parallel_tests}
+    ].
+
+groups() ->
+    [
+      {parallel_tests, [parallel], [
+                                    test_topic_selection,
+                                    test_default_topic_selection
+                                   ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE}
+      ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+test_topic_selection(Config) ->
+    {Connection, Channel} = open_connection_and_channel(Config),
+
+    Exchange = declare_rjms_exchange(Channel, "rjms_test_topic_selector_exchange", [?VERSION_ARG]),
+
+    %% Declare a queue and bind it
+    Q = declare_queue(Channel),
+    bind_queue(Channel, Q, Exchange, <<"select-key">>, [?BSELECTARG(<<"{ident, <<\"boolVal\">>}.">>), ?VERSION_ARG]),
+
+    publish_two_messages(Channel, Exchange, <<"select-key">>),
+
+    get_and_check(Channel, Q, 0, <<"true">>),
+
+    close_connection_and_channel(Connection, Channel),
+    ok.
+
+test_default_topic_selection(Config) ->
+    {Connection, Channel} = open_connection_and_channel(Config),
+
+    Exchange = declare_rjms_exchange(Channel, "rjms_test_default_selector_exchange", [?VERSION_ARG]),
+
+    %% Declare a queue and bind it
+    Q = declare_queue(Channel),
+    bind_queue(Channel, Q, Exchange, <<"select-key">>, [?BSELECTARG(<<"{ident, <<\"boolVal\">>}.">>), ?VERSION_ARG]),
+    publish_two_messages(Channel, Exchange, <<"select-key">>),
+    get_and_check(Channel, Q, 0, <<"true">>),
+
+    close_connection_and_channel(Connection, Channel),
+    ok.
+
+%% Declare a rjms_topic_selector exchange, with args
+declare_rjms_exchange(Ch, XNameStr, XArgs) ->
+    Exchange = list_to_binary(XNameStr),
+    Decl = #'exchange.declare'{ exchange = Exchange
+                              , type = <<"x-jms-topic">>
+                              , arguments = XArgs },
+    #'exchange.declare_ok'{} = amqp_channel:call(Ch, Decl),
+    Exchange.
+
+%% Bind a selector queue to an exchange
+bind_queue(Ch, Q, Ex, RKey, Args) ->
+    Binding = #'queue.bind'{ queue       = Q
+                           , exchange    = Ex
+                           , routing_key = RKey
+                           , arguments   = Args
+                           },
+    #'queue.bind_ok'{} = amqp_channel:call(Ch, Binding),
+    ok.
+
+%% Declare a queue, return Q name (as binary)
+declare_queue(Ch) ->
+    #'queue.declare_ok'{queue = Q} = amqp_channel:call(Ch, #'queue.declare'{}),
+    Q.
+
+%% Get message from Q and check remaining and payload.
+get_and_check(Channel, Queue, ExpectedRemaining, ExpectedPayload) ->
+    Get = #'basic.get'{queue = Queue},
+    {#'basic.get_ok'{delivery_tag = Tag, message_count = Remaining}, Content}
+      = amqp_channel:call(Channel, Get),
+    amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag}),
+
+    ExpectedRemaining = Remaining,
+    ExpectedPayload = Content#amqp_msg.payload,
+    ok.
+
+publish_two_messages(Chan, Exch, RoutingKey) ->
+    Publish = #'basic.publish'{exchange = Exch, routing_key = RoutingKey},
+    amqp_channel:cast(Chan, Publish, ?BASICMSG(<<"false">>, [{<<"boolVal">>, 'bool', false}])),
+    amqp_channel:cast(Chan, Publish, ?BASICMSG(<<"true">>, [{<<"boolVal">>, 'bool', true}])),
+    ok.
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_unit_SUITE.erl b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/test/rjms_topic_selector_unit_SUITE.erl
new file mode 100644 (file)
index 0000000..4bcd0d0
--- /dev/null
@@ -0,0 +1,115 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2012, 2013 Pivotal Software, Inc.  All rights reserved.
+%% -----------------------------------------------------------------------------
+
+%% Unit test file for RJMS Topic Selector plugin
+
+%% -----------------------------------------------------------------------------
+
+-module(rjms_topic_selector_unit_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("rabbit_jms_topic_exchange.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-import(rabbit_jms_topic_exchange, [ description/0
+                                   , serialise_events/0
+                                   , route/2
+                                   , validate/1
+                                   , create/2
+                                   , delete/3
+                                   , validate_binding/2
+                                   , add_binding/3
+                                   , remove_bindings/3
+                                   , assert_args_equivalence/2
+                                   , policy_changed/3 ]).
+
+
+all() ->
+    [
+      {group, parallel_tests}
+    ].
+
+groups() ->
+    [
+      {parallel_tests, [parallel], [
+                                    description_test,
+                                    serialise_events_test,
+                                    validate_test,
+                                    create_test,
+                                    delete_test,
+                                    validate_binding_test,
+                                    add_binding_test
+                                   ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    Config.
+
+end_per_suite(Config) ->
+    Config.
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(_Testcase, Config) ->
+    Config.
+
+end_per_testcase(_Testcase, Config) ->
+    Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+description_test(_Config) ->
+  ?assertMatch([{name, _}, {description, _}], description()).
+
+serialise_events_test(_Config) ->
+  ?assertMatch(false, serialise_events()).
+
+validate_test(_Config) ->
+  ?assertEqual(ok, validate(any_exchange)).
+
+create_test(_Config) ->
+  ?assertEqual(ok, create(none, any_exchange)).
+
+delete_test(_Config) ->
+  ?assertEqual(ok, delete(none, any_exchange, any_bindings)).
+
+validate_binding_test(_Config) ->
+  ?assertEqual(ok, validate_binding(any_exchange, any_bindings)).
+
+add_binding_test(_Config) ->
+  ?assertEqual(ok, add_binding(none, dummy_exchange(), dummy_binding())).
+
+dummy_exchange() ->
+  #exchange{name = <<"XName">>, arguments = []}.
+
+dummy_binding() ->
+  #binding{ key = <<"BindingKey">>
+          , destination = #resource{name = <<"DName">>}
+          , args = [{?RJMS_COMPILED_SELECTOR_ARG, longstr, <<"<<\"false\">>.">>}
+                   ,{?RJMS_VERSION_ARG, longstr, <<"1.4.7">>}]}.
diff --git a/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/test/sjx_evaluation_SUITE.erl b/rabbitmq-server/deps/rabbitmq_jms_topic_exchange/test/sjx_evaluation_SUITE.erl
new file mode 100644 (file)
index 0000000..969a5a1
--- /dev/null
@@ -0,0 +1,134 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% Copyright (c) 2012, 2013 Pivotal Software, Inc.  All rights reserved.
+%% -----------------------------------------------------------------------------
+%% Derived from works which were:
+%% Copyright (c) 2012, 2013 Steve Powell (Zteve.Powell@gmail.com)
+%% -----------------------------------------------------------------------------
+
+%% Tests for sjx_evaluator
+
+%% -----------------------------------------------------------------------------
+-module(sjx_evaluation_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-import(sjx_evaluator, [evaluate/2]).
+
+%% Fixed type info for identifiers
+%%
+-define(TEST_TYPE_INFO,
+[ {<<"JMSType">>,          longstr, <<"string">>}
+, {<<"JMSCorrelationID">>, longstr, <<"string">>}
+, {<<"JMSMessageID">>,     longstr, <<"string">>}
+, {<<"JMSDeliveryMode">>,  longstr, <<"string">>}
+, {<<"JMSPriority">>,      longstr, <<"number">>}
+, {<<"JMSTimestamp">>,     longstr, <<"number">>}
+]).
+
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                basic_evaluate_test
+                               ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    Config.
+
+end_per_suite(Config) ->
+    Config.
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(_Testcase, Config) ->
+    Config.
+
+end_per_testcase(_Testcase, Config) ->
+    Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+basic_evaluate_test(_Config) ->
+    Hs = [{<<"JMSType">>, longstr, <<"car">>},
+          {<<"colour">>, longstr, <<"blue">>},
+          {<<"altcol">>, longstr, <<"'blue">>},
+          {<<"likevar">>, longstr, <<"bl_ue">>},
+          {<<"weight">>, signedint, 2501},
+          {<<"WeIgHt">>, signedint, 2},
+          {<<"afloat">>, float, 3.0e-2},
+          {<<"abool">>, bool, false}],
+
+    [ ?_assert(    eval(Hs, {'=', {'ident', <<"JMSType">>}, <<"car">>}                      ))
+    , ?_assert(not eval(Hs, {'ident', <<"abool">>}                                          ))
+    , ?_assert(    eval(Hs, {'not', {'ident', <<"abool">>}}                                 ))
+    , ?_assert(    eval(Hs, {'=', {'ident', <<"colour">>}, <<"blue">>}                      ))
+    , ?_assert(    eval(Hs, {'=', {'ident', <<"weight">>}, 2501}                            ))
+    , ?_assert(    eval(Hs, {'=', {'ident', <<"WeIgHt">>}, 2}                               ))
+    , ?_assert(    eval(Hs, {'=', 2501, {'ident', <<"weight">>}}                            ))
+    , ?_assert(    eval(Hs, {'=', {'ident', <<"afloat">>}, 3.0e-2}                          ))
+    , ?_assert(    eval(Hs, {'>', {'ident', <<"weight">>}, 2500}                            ))
+    , ?_assert(    eval(Hs, {'<', {'ident', <<"weight">>}, 2502}                            ))
+    , ?_assert(    eval(Hs, {'>=', {'ident', <<"weight">>}, 2501}                           ))
+    , ?_assert(    eval(Hs, {'<=', {'ident', <<"weight">>}, 2501}                           ))
+    , ?_assert(not eval(Hs, {'<=', {'ident', <<"weight">>}, 2500}                           ))
+    , ?_assert(    eval(Hs, {'between', {'ident', <<"weight">>}, {'range', 0, 2501}}        ))
+    , ?_assert(    eval(Hs, {'between', {'ident', <<"weight">>}, {'range', 2500, 2501}}     ))
+    , ?_assert(    eval(Hs, {'between', 17, {'range', 17, 18}}                              ))
+    , ?_assert(    eval(Hs, {'between', 17, {'range', 17, 17}}                              ))
+    , ?_assert(    eval(Hs, {'not_between', 16, {'range', 17, 18}}                          ))
+    , ?_assert(    eval(Hs, {'<', 2500, {'ident', <<"weight">>}}                            ))
+    , ?_assert(    eval(Hs, {'>', 2502, {'ident', <<"weight">>}}                            ))
+    , ?_assert(    eval(Hs, {'<=', 2500, {'ident', <<"weight">>}}                           ))
+    , ?_assert(    eval(Hs, {'>=', 2502, {'ident', <<"weight">>}}                           ))
+    , ?_assert(    eval(Hs, {'<=', 2501, {'ident', <<"weight">>}}                           ))
+    , ?_assert(    eval(Hs, {'>=', 2501, {'ident', <<"weight">>}}                           ))
+    , ?_assert(    eval(Hs, {'like', {'ident', <<"colour">>}, {<<"bl%">>, 'no_escape'}}     ))
+    , ?_assert(    eval(Hs, {'like', {'ident', <<"likevar">>}, {<<"b_!_ue">>, <<"!">>}}     ))
+    , ?_assert(    eval(Hs, {'like', {'ident', <<"colour">>}, {<<"bl_e">>, 'no_escape'}}    ))
+    , ?_assert(    eval(Hs, {'not_like', {'ident', <<"colour">>}, {<<"l%">>, 'no_escape'}}  ))
+    , ?_assert(not eval(Hs, {'not_like', {'ident', <<"colour">>}, {<<"bl%">>, 'no_escape'}} ))
+    , ?_assert(    eval(Hs, {'in', {'ident', <<"colour">>}, [<<"blue">>, <<"green">>]}      ))
+    , ?_assert(not eval(Hs, {'not_in', {'ident', <<"colour">>}, [<<"green">>, <<"blue">>]}  ))
+    , ?_assert(not eval(Hs, {'in', {'ident', <<"colour">>}, [<<"bleen">>, <<"grue">>]}      ))
+    , ?_assert(    eval(Hs, {'not_in', {'ident', <<"colour">>}, [<<"grue">>, <<"bleen">>]}  ))
+    , ?_assert(    eval(Hs, {'not_like', {'ident', <<"altcol">>}, {<<"bl%">>, 'no_escape'}} ))
+    , ?_assert(    eval(Hs, {'like', {'ident', <<"altcol">>}, {<<"'bl%">>, 'no_escape'}}    ))
+    , ?_assert(    eval(Hs, {'or', {'and', {'like', {'ident', <<"colour">>}, {<<"bl%">>, 'no_escape'}}
+                                         , {'>', {'ident', <<"weight">>}, 2500}}
+                                 , false}                                                   ))
+    , ?_assert(undefined =:= eval(Hs, {'<=', {'ident', <<"missing">>}, 2500}                ))
+    , ?_assert(undefined =:= eval(Hs, {'in', {'ident', <<"missing">>}, [<<"blue">>]}        ))
+    ].
+
+eval(Hs, S) -> evaluate(S, Hs).
diff --git a/rabbitmq-server/deps/rabbitmq_management/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_management/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index 8c66c1737e0e8626cd4eb1f7dcf2501ce6b4559d..08787f5243b37f6b237da827b99cc5908fcd6bc4 100644 (file)
@@ -1,10 +1,14 @@
 PROJECT = rabbitmq_management
 
+TEST_DEPS += rabbit
+
 DEPS = amqp_client webmachine rabbitmq_web_dispatch rabbitmq_management_agent
 dep_webmachine = git https://github.com/rabbitmq/webmachine.git 6b5210c0ed07159f43222255e05a90bbef6c8cbe
 dep_rabbitmq_web_dispatch = git https://github.com/rabbitmq/rabbitmq-web-dispatch.git stable
 
-DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-dist.mk \
+             rabbit_common/mk/rabbitmq-run.mk \
+             rabbit_common/mk/rabbitmq-tools.mk
 
 # FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
 # reviewed and merged.
@@ -13,6 +17,7 @@ ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
 ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
 include erlang.mk
 
 # --------------------------------------------------------------------
@@ -25,28 +30,3 @@ list-dist-deps::
 prepare-dist::
        $(verbose) sed 's/%%VSN%%/$(VSN)/' bin/rabbitmqadmin \
                > $(EZ_DIR)/priv/www/cli/rabbitmqadmin
-
-# --------------------------------------------------------------------
-# Testing.
-# --------------------------------------------------------------------
-
-FILTER := all
-COVER := false
-
-WITH_BROKER_TEST_MAKEVARS := \
-       RABBITMQ_CONFIG_FILE=$(CURDIR)/etc/rabbit-test
-WITH_BROKER_TEST_ENVVARS := \
-       RABBITMQADMIN=$(CURDIR)/bin/rabbitmqadmin
-WITH_BROKER_TEST_COMMANDS := \
-       rabbit_test_runner:run_in_broker(\"$(CURDIR)/test\",\"$(FILTER)\")
-WITH_BROKER_TEST_SCRIPTS := $(CURDIR)/test/src/rabbitmqadmin-test-wrapper.sh
-
-TEST_PLUGINS_ROOTDIR = $(TEST_TMPDIR)/PLUGINS
-
-STANDALONE_TEST_COMMANDS := \
-       rabbit_test_runner:run_multi(\"$(DEPS_DIR)\",\"$(CURDIR)/test\",\"$(FILTER)\",$(COVER),\"$(TEST_PLUGINS_ROOTDIR)\")
-
-pre-standalone-tests:: test-tmpdir test-dist
-       $(verbose) rm -rf $(TEST_PLUGINS_ROOTDIR)
-       $(exec_verbose) mkdir -p $(TEST_PLUGINS_ROOTDIR)
-       $(verbose) cp -a $(DIST_DIR) $(TEST_PLUGINS_ROOTDIR)
index a33271ca549b7c9b08dbbde6a88a54c3736d2fac..0b329ad07b16854c1bb00657810ee1875bab0837 100755 (executable)
@@ -17,7 +17,7 @@
 
 import sys
 if sys.version_info[0] < 2 or (sys.version_info[0] == 2 and sys.version_info[1] < 6):
-    print("Sorry, rabbitmqadmin requires at least Python 2.6.")
+    print("Sorry, rabbitmqadmin requires at least Python 2.6 (2.7.9 when HTTPS is enabled).")
     sys.exit(1)
 
 from optparse import OptionParser, TitledHelpFormatter
@@ -26,6 +26,8 @@ import base64
 import json
 import os
 import socket
+import ssl
+import traceback
 
 if sys.version_info[0] == 2:
     from ConfigParser import ConfigParser, NoSectionError
@@ -311,6 +313,10 @@ def make_parser():
         help="PEM format key file for SSL")
     add("--ssl-cert-file", dest="ssl_cert_file",
         help="PEM format certificate file for SSL")
+    add("--ssl-ca-cert-file", dest="ssl_ca_cert_file",
+        help="PEM format CA certificate file for SSL")
+    add("--ssl-disable-hostname-verification", dest="ssl_disable_hostname_verification",
+        help="Disables peer hostname verification", default=False, action="store_true" )
     add("-f", "--format", dest="format",
         help="format for listing commands - one of [" + ", ".join(FORMATS.keys())  + "]")
     add("-S", "--sort", dest="sort", help="sort key for listing queries")
@@ -438,12 +444,35 @@ class Management:
     def delete(self, path):
         return self.http("DELETE", "%s/api%s" % (self.options.path_prefix, path), "")
 
+    def __initialize_https_connection(self, hostname, port):
+        # Python 2.7.9+
+        if hasattr(ssl, 'create_default_context'):
+            return httplib.HTTPSConnection(hostname, port,
+                                           context = self.__initialize_tls_context())
+        # Python < 2.7.8, note: those versions still have SSLv3 enabled
+        #                       and other limitations. See rabbitmq/rabbitmq-management#225
+        else:
+            print("Warning: rabbitmqadmin requires Python 2.7.9+ when HTTPS is used.")
+            return httplib.HTTPSConnection(hostname, port,
+                                           cert_file = self.options.ssl_cert_file,
+                                           key_file  = self.options.ssl_key_file)
+
+    def __initialize_tls_context(self):
+        # Python 2.7.9+ only
+        ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+        ssl_ctx.options &= ~ssl.OP_NO_SSLv3
+        ssl_ctx.verify_mode = ssl.CERT_REQUIRED
+        ssl_ctx.check_hostname = not self.options.ssl_disable_hostname_verification
+        ssl_ctx.load_cert_chain(self.options.ssl_cert_file,
+                                self.options.ssl_key_file)
+        if self.options.ssl_ca_cert_file:
+            ssl_ctx.load_verify_locations(self.options.ssl_ca_cert_file)
+        return ssl_ctx
+
     def http(self, method, path, body):
         if self.options.ssl:
-            conn = httplib.HTTPSConnection(self.options.hostname,
-                                           self.options.port,
-                                           self.options.ssl_key_file,
-                                           self.options.ssl_cert_file)
+            conn = self.__initialize_https_connection(self.options.hostname,
+                                                      self.options.port)
         else:
             conn = httplib.HTTPConnection(self.options.hostname,
                                           self.options.port)
@@ -455,6 +484,7 @@ class Management:
         try:
             conn.request(method, path, body, headers)
         except socket.error as e:
+            traceback.print_exc(e)
             die("Could not connect: {0}".format(e))
         resp = conn.getresponse()
         if resp.status == 400:
index 9f0c0c38494c4beabf27ccddfa996d51d66a91d8..efbcf5cd11a59ef1425ead2dfa4b0514e62b437b 100644 (file)
@@ -16,7 +16,7 @@
 
 ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
 
-ERLANG_MK_VERSION = 2.0.0-pre.2-16-gb52203c-dirty
+ERLANG_MK_VERSION = 2.0.0-pre.2-76-g427cfb8
 
 # Core configuration.
 
@@ -84,7 +84,7 @@ all:: deps app rel
 rel::
        $(verbose) :
 
-check:: clean app tests
+check:: tests
 
 clean:: clean-crashdump
 
@@ -421,6 +421,14 @@ pkg_boss_db_fetch = git
 pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
 pkg_boss_db_commit = master
 
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
 PACKAGES += bson
 pkg_bson_name = bson
 pkg_bson_description = BSON documents in Erlang, see bsonspec.org
@@ -885,14 +893,6 @@ pkg_dh_date_fetch = git
 pkg_dh_date_repo = https://github.com/daleharvey/dh_date
 pkg_dh_date_commit = master
 
-PACKAGES += dhtcrawler
-pkg_dhtcrawler_name = dhtcrawler
-pkg_dhtcrawler_description = dhtcrawler is a DHT crawler written in erlang. It can join a DHT network and crawl many P2P torrents.
-pkg_dhtcrawler_homepage = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_fetch = git
-pkg_dhtcrawler_repo = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_commit = master
-
 PACKAGES += dirbusterl
 pkg_dirbusterl_name = dirbusterl
 pkg_dirbusterl_description = DirBuster successor in Erlang
@@ -1139,7 +1139,7 @@ pkg_elvis_description = Erlang Style Reviewer
 pkg_elvis_homepage = https://github.com/inaka/elvis
 pkg_elvis_fetch = git
 pkg_elvis_repo = https://github.com/inaka/elvis
-pkg_elvis_commit = 0.2.4
+pkg_elvis_commit = master
 
 PACKAGES += emagick
 pkg_emagick_name = emagick
@@ -1781,6 +1781,14 @@ pkg_geef_fetch = git
 pkg_geef_repo = https://github.com/carlosmn/geef
 pkg_geef_commit = master
 
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
 PACKAGES += gen_cycle
 pkg_gen_cycle_name = gen_cycle
 pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
@@ -1981,6 +1989,14 @@ pkg_hyper_fetch = git
 pkg_hyper_repo = https://github.com/GameAnalytics/hyper
 pkg_hyper_commit = master
 
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
 PACKAGES += ibrowse
 pkg_ibrowse_name = ibrowse
 pkg_ibrowse_description = Erlang HTTP client
@@ -2501,6 +2517,14 @@ pkg_merl_fetch = git
 pkg_merl_repo = https://github.com/richcarl/merl
 pkg_merl_commit = master
 
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
 PACKAGES += mimetypes
 pkg_mimetypes_name = mimetypes
 pkg_mimetypes_description = Erlang MIME types library
@@ -2733,14 +2757,6 @@ pkg_oauth2_fetch = git
 pkg_oauth2_repo = https://github.com/kivra/oauth2
 pkg_oauth2_commit = master
 
-PACKAGES += oauth2c
-pkg_oauth2c_name = oauth2c
-pkg_oauth2c_description = Erlang OAuth2 Client
-pkg_oauth2c_homepage = https://github.com/kivra/oauth2_client
-pkg_oauth2c_fetch = git
-pkg_oauth2c_repo = https://github.com/kivra/oauth2_client
-pkg_oauth2c_commit = master
-
 PACKAGES += octopus
 pkg_octopus_name = octopus
 pkg_octopus_description = Small and flexible pool manager written in Erlang
@@ -3533,6 +3549,14 @@ pkg_stripe_fetch = git
 pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
 pkg_stripe_commit = v1
 
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
 PACKAGES += surrogate
 pkg_surrogate_name = surrogate
 pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
@@ -3907,7 +3931,7 @@ pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
 pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
 pkg_xref_runner_fetch = git
 pkg_xref_runner_repo = https://github.com/inaka/xref_runner
-pkg_xref_runner_commit = 0.2.0
+pkg_xref_runner_commit = 0.2.3
 
 PACKAGES += yamerl
 pkg_yamerl_name = yamerl
@@ -4092,7 +4116,10 @@ endif
 # While Makefile file could be GNUmakefile or makefile,
 # in practice only Makefile is needed so far.
 define dep_autopatch
-       if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+               $(call dep_autopatch_erlang_mk,$(1)); \
+       elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
                if [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
                        $(call dep_autopatch2,$(1)); \
                elif [ 0 != `grep -ci rebar $(DEPS_DIR)/$(1)/Makefile` ]; then \
@@ -4100,12 +4127,7 @@ define dep_autopatch
                elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i rebar '{}' \;`" ]; then \
                        $(call dep_autopatch2,$(1)); \
                else \
-                       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
-                               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
-                               $(call dep_autopatch_erlang_mk,$(1)); \
-                       else \
-                               $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
-                       fi \
+                       $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
                fi \
        else \
                if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
@@ -4117,8 +4139,11 @@ define dep_autopatch
 endef
 
 define dep_autopatch2
+       if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+               $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+       fi; \
        $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
-       if [ -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
+       if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
                $(call dep_autopatch_fetch_rebar); \
                $(call dep_autopatch_rebar,$(1)); \
        else \
@@ -4256,57 +4281,6 @@ define dep_autopatch_rebar.erl
                                Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
                end
        end(),
-       FindFirst = fun(F, Fd) ->
-               case io:parse_erl_form(Fd, undefined) of
-                       {ok, {attribute, _, compile, {parse_transform, PT}}, _} ->
-                               [PT, F(F, Fd)];
-                       {ok, {attribute, _, compile, CompileOpts}, _} when is_list(CompileOpts) ->
-                               case proplists:get_value(parse_transform, CompileOpts) of
-                                       undefined -> [F(F, Fd)];
-                                       PT -> [PT, F(F, Fd)]
-                               end;
-                       {ok, {attribute, _, include, Hrl}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
-                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                       _ ->
-                                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ Hrl, [read]) of
-                                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                                       _ -> [F(F, Fd)]
-                                               end
-                               end;
-                       {ok, {attribute, _, include_lib, "$(1)/include/" ++ Hrl}, _} ->
-                               {ok, HrlFd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]),
-                               [F(F, HrlFd), F(F, Fd)];
-                       {ok, {attribute, _, include_lib, Hrl}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
-                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                       _ -> [F(F, Fd)]
-                               end;
-                       {ok, {attribute, _, import, {Imp, _}}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(Imp) ++ ".erl", [read]) of
-                                       {ok, ImpFd} -> [Imp, F(F, ImpFd), F(F, Fd)];
-                                       _ -> [F(F, Fd)]
-                               end;
-                       {eof, _} ->
-                               file:close(Fd),
-                               [];
-                       _ ->
-                               F(F, Fd)
-               end
-       end,
-       fun() ->
-               ErlFiles = filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)*.erl"),
-               First0 = lists:usort(lists:flatten([begin
-                       {ok, Fd} = file:open(F, [read]),
-                       FindFirst(FindFirst, Fd)
-               end || F <- ErlFiles])),
-               First = lists:flatten([begin
-                       {ok, Fd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", [read]),
-                       FindFirst(FindFirst, Fd)
-               end || M <- First0, lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)]) ++ First0,
-               Write(["COMPILE_FIRST +=", [[" ", atom_to_list(M)] || M <- First,
-                       lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)], "\n"])
-       end(),
        Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
        Write("\npreprocess::\n"),
        Write("\npre-deps::\n"),
@@ -4419,9 +4393,10 @@ define dep_autopatch_rebar.erl
                                        Output, ": $$\(foreach ext,.c .C .cc .cpp,",
                                                "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
                                        "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
-                                       case filename:extension(Output) of
-                                               [] -> "\n";
-                                               _ -> " -shared\n"
+                                       case {filename:extension(Output), $(PLATFORM)} of
+                                           {[], _} -> "\n";
+                                           {_, darwin} -> "\n";
+                                           _ -> " -shared\n"
                                        end])
                        end,
                        [PortSpec(S) || S <- PortSpecs]
@@ -4490,6 +4465,15 @@ define dep_autopatch_app.erl
        halt()
 endef
 
+define dep_autopatch_appsrc_script.erl
+       AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+       AppSrcScript = AppSrc ++ ".script",
+       Bindings = erl_eval:new_bindings(),
+       {ok, Conf} = file:script(AppSrcScript, Bindings),
+       ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+       halt()
+endef
+
 define dep_autopatch_appsrc.erl
        AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
        AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
@@ -4576,10 +4560,11 @@ $(DEPS_DIR)/$(call dep_name,$1):
                exit 17; \
        fi
        $(verbose) mkdir -p $(DEPS_DIR)
-       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$1)),$1)
-       $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure.ac -o -f $(DEPS_DIR)/$(DEP_NAME)/configure.in ]; then \
-               echo " AUTO  " $(DEP_STR); \
-               cd $(DEPS_DIR)/$(DEP_NAME) && autoreconf -Wall -vif -I m4; \
+       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+       $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+                       && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+               echo " AUTO  " $(1); \
+               cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
        fi
        - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
                echo " CONF  " $(DEP_STR); \
@@ -4672,28 +4657,10 @@ dtl_verbose = $(dtl_verbose_$(V))
 
 # Core targets.
 
-define erlydtl_compile.erl
-       [begin
-               Module0 = case "$(strip $(DTL_FULL_PATH))" of
-                       "" ->
-                               filename:basename(F, ".dtl");
-                       _ ->
-                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
-                               re:replace(F2, "/",  "_",  [{return, list}, global])
-               end,
-               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
-               case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
-                       ok -> ok;
-                       {ok, _} -> ok
-               end
-       end || F <- string:tokens("$(1)", " ")],
-       halt().
-endef
-
-ifneq ($(wildcard src/),)
-
 DTL_FILES = $(sort $(call core_find,$(DTL_PATH),*.dtl))
 
+ifneq ($(DTL_FILES),)
+
 ifdef DTL_FULL_PATH
 BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(subst /,_,$(DTL_FILES:$(DTL_PATH)%=%))))
 else
@@ -4701,7 +4668,7 @@ BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(notdir $(DTL_FILES
 endif
 
 ifneq ($(words $(DTL_FILES)),0)
-# Rebuild everything when the Makefile changes.
+# Rebuild templates when the Makefile changes.
 $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
        @mkdir -p $(ERLANG_MK_TMP)
        @if test -f $@; then \
@@ -4712,9 +4679,28 @@ $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
 ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
 endif
 
-ebin/$(PROJECT).app:: $(DTL_FILES)
+define erlydtl_compile.erl
+       [begin
+               Module0 = case "$(strip $(DTL_FULL_PATH))" of
+                       "" ->
+                               filename:basename(F, ".dtl");
+                       _ ->
+                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
+                               re:replace(F2, "/",  "_",  [{return, list}, global])
+               end,
+               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+               case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
+                       ok -> ok;
+                       {ok, _} -> ok
+               end
+       end || F <- string:tokens("$(1)", " ")],
+       halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
        $(if $(strip $?),\
-               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?,-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/)))
+               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?),-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/))
+
 endif
 
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
@@ -4888,51 +4874,79 @@ $(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
 # Erlang and Core Erlang files.
 
 define makedep.erl
+       E = ets:new(makedep, [bag]),
+       G = digraph:new([acyclic]),
        ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
-       Modules = [{filename:basename(F, ".erl"), F} || F <- ErlFiles],
-       Add = fun (Dep, Acc) ->
-               case lists:keyfind(atom_to_list(Dep), 1, Modules) of
-                       {_, DepFile} -> [DepFile|Acc];
-                       false -> Acc
+       Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+       Add = fun (Mod, Dep) ->
+               case lists:keyfind(Dep, 1, Modules) of
+                       false -> ok;
+                       {_, DepFile} ->
+                               {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+                               ets:insert(E, {ModFile, DepFile}),
+                               digraph:add_vertex(G, Mod),
+                               digraph:add_vertex(G, Dep),
+                               digraph:add_edge(G, Mod, Dep)
                end
        end,
-       AddHd = fun (Dep, Acc) ->
-               case {Dep, lists:keymember(Dep, 2, Modules)} of
-                       {"src/" ++ _, false} -> [Dep|Acc];
-                       {"include/" ++ _, false} -> [Dep|Acc];
-                       _ -> Acc
+       AddHd = fun (F, Mod, DepFile) ->
+               case file:open(DepFile, [read]) of
+                       {error, enoent} -> ok;
+                       {ok, Fd} ->
+                               F(F, Fd, Mod),
+                               {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+                               ets:insert(E, {ModFile, DepFile})
                end
        end,
-       CompileFirst = fun (Deps) ->
-               First0 = [case filename:extension(D) of
-                       ".erl" -> filename:basename(D, ".erl");
-                       _ -> []
-               end || D <- Deps],
-               case lists:usort(First0) of
-                       [] -> [];
-                       [[]] -> [];
-                       First -> ["COMPILE_FIRST +=", [[" ", F] || F <- First], "\n"]
-               end
+       Attr = fun
+               (F, Mod, behavior, Dep) -> Add(Mod, Dep);
+               (F, Mod, behaviour, Dep) -> Add(Mod, Dep);
+               (F, Mod, compile, {parse_transform, Dep}) -> Add(Mod, Dep);
+               (F, Mod, compile, Opts) when is_list(Opts) ->
+                       case proplists:get_value(parse_transform, Opts) of
+                               undefined -> ok;
+                               Dep -> Add(Mod, Dep)
+                       end;
+               (F, Mod, include, Hrl) ->
+                       case filelib:is_file("include/" ++ Hrl) of
+                               true -> AddHd(F, Mod, "include/" ++ Hrl);
+                               false ->
+                                       case filelib:is_file("src/" ++ Hrl) of
+                                               true -> AddHd(F, Mod, "src/" ++ Hrl);
+                                               false -> false
+                                       end
+                       end;
+               (F, Mod, include_lib, "$1/include/" ++ Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+               (F, Mod, include_lib, Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+               (F, Mod, import, {Imp, _}) ->
+                       case filelib:is_file("src/" ++ atom_to_list(Imp) ++ ".erl") of
+                               false -> ok;
+                               true -> Add(Mod, Imp)
+                       end;
+               (_, _, _, _) -> ok
        end,
-       Depend = [begin
-               case epp:parse_file(F, ["include/"], []) of
-                       {ok, Forms} ->
-                               Deps = lists:usort(lists:foldl(fun
-                                       ({attribute, _, behavior, Dep}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, behaviour, Dep}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, compile, {parse_transform, Dep}}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, file, {Dep, _}}, Acc) -> AddHd(Dep, Acc);
-                                       (_, Acc) -> Acc
-                               end, [], Forms)),
-                               case Deps of
-                                       [] -> "";
-                                       _ -> [F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n", CompileFirst(Deps)]
-                               end;
-                       {error, enoent} ->
-                               []
+       MakeDepend = fun(F, Fd, Mod) ->
+               case io:parse_erl_form(Fd, undefined) of
+                       {ok, {attribute, _, Key, Value}, _} ->
+                               Attr(F, Mod, Key, Value),
+                               F(F, Fd, Mod);
+                       {eof, _} ->
+                               file:close(Fd);
+                       _ ->
+                               F(F, Fd, Mod)
                end
+       end,
+       [begin
+               Mod = list_to_atom(filename:basename(F, ".erl")),
+               {ok, Fd} = file:open(F, [read]),
+               MakeDepend(MakeDepend, Fd, Mod)
        end || F <- ErlFiles],
-       ok = file:write_file("$(1)", Depend),
+       Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+       CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+       ok = file:write_file("$(1)", [
+               [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+               "\nCOMPILE_FIRST +=", [[" ", atom_to_list(CF)] || CF <- CompileFirst], "\n"
+       ]),
        halt()
 endef
 
@@ -5069,6 +5083,11 @@ test-dir:
                $(call core_find,$(TEST_DIR)/,*.erl) -pa ebin/
 endif
 
+ifeq ($(wildcard src),)
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: clean deps test-deps
+       $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+else
 ifeq ($(wildcard ebin/test),)
 test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
 test-build:: clean deps test-deps $(PROJECT).d
@@ -5086,6 +5105,7 @@ clean-test-dir:
 ifneq ($(wildcard $(TEST_DIR)/*.beam),)
        $(gen_verbose) rm -f $(TEST_DIR)/*.beam
 endif
+endif
 
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -5103,11 +5123,14 @@ $(if $(filter-out -Werror,$1),\
                $(shell echo $1 | cut -b 2-)))
 endef
 
+define compat_erlc_opts_to_list
+       [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
 define compat_rebar_config
 {deps, [$(call comma_list,$(foreach d,$(DEPS),\
        {$(call dep_name,$d),".*",{git,"$(call dep_repo,$d)","$(call dep_commit,$d)"}}))]}.
-{erl_opts, [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$(ERLC_OPTS)),\
-       $(call compat_convert_erlc_opts,$o)))]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
 endef
 
 $(eval _compat_rebar_config = $$(compat_rebar_config))
@@ -5126,12 +5149,12 @@ MAN_SECTIONS ?= 3 7
 
 docs:: asciidoc
 
-asciidoc: distclean-asciidoc doc-deps asciidoc-guide asciidoc-manual
+asciidoc: asciidoc-guide asciidoc-manual
 
 ifeq ($(wildcard doc/src/guide/book.asciidoc),)
 asciidoc-guide:
 else
-asciidoc-guide:
+asciidoc-guide: distclean-asciidoc doc-deps
        a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
        a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
 endif
@@ -5139,7 +5162,7 @@ endif
 ifeq ($(wildcard doc/src/manual/*.asciidoc),)
 asciidoc-manual:
 else
-asciidoc-manual:
+asciidoc-manual: distclean-asciidoc doc-deps
        for f in doc/src/manual/*.asciidoc ; do \
                a2x -v -f manpage $$f ; \
        done
@@ -5154,7 +5177,7 @@ install-docs:: install-asciidoc
 install-asciidoc: asciidoc-manual
        for s in $(MAN_SECTIONS); do \
                mkdir -p $(MAN_INSTALL_PATH)/man$$s/ ; \
-               install -g 0 -o 0 -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
+               install -g `id -u` -o `id -g` -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
        done
 endif
 
@@ -5214,6 +5237,8 @@ define bs_appsrc_lib
 ]}.
 endef
 
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
 ifdef SP
 define bs_Makefile
 PROJECT = $p
@@ -5223,17 +5248,21 @@ PROJECT_VERSION = 0.0.1
 # Whitespace to be used when creating files from templates.
 SP = $(SP)
 
-include erlang.mk
 endef
 else
 define bs_Makefile
 PROJECT = $p
-include erlang.mk
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
 endef
 endif
 
 define bs_apps_Makefile
 PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
 include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)/erlang.mk
 endef
 
@@ -5527,6 +5556,7 @@ endif
        $(eval p := $(PROJECT))
        $(eval n := $(PROJECT)_sup)
        $(call render_template,bs_Makefile,Makefile)
+       $(verbose) echo "include erlang.mk" >> Makefile
        $(verbose) mkdir src/
 ifdef LEGACY
        $(call render_template,bs_appsrc,src/$(PROJECT).app.src)
@@ -5540,6 +5570,7 @@ ifneq ($(wildcard src/),)
 endif
        $(eval p := $(PROJECT))
        $(call render_template,bs_Makefile,Makefile)
+       $(verbose) echo "include erlang.mk" >> Makefile
        $(verbose) mkdir src/
 ifdef LEGACY
        $(call render_template,bs_appsrc_lib,src/$(PROJECT).app.src)
@@ -5620,12 +5651,32 @@ list-templates:
 
 C_SRC_DIR ?= $(CURDIR)/c_src
 C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
-C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT).so
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
 C_SRC_TYPE ?= shared
 
 # System type and C compiler/flags.
 
-ifeq ($(PLATFORM),darwin)
+ifeq ($(PLATFORM),msys2)
+       C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+       C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+       C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+       C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+       C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+       C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+       CC = /mingw64/bin/gcc
+       CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
        CC ?= cc
        CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes
        CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall
@@ -5640,10 +5691,15 @@ else ifeq ($(PLATFORM),linux)
        CXXFLAGS ?= -O3 -finline-functions -Wall
 endif
 
-CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
-CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+ifneq ($(PLATFORM),msys2)
+       CFLAGS += -fPIC
+       CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
 
-LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lerl_interface -lei
 
 # Verbosity.
 
@@ -5680,15 +5736,15 @@ OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
 COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
 COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
 
-app:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
 
-test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
 
-$(C_SRC_OUTPUT): $(OBJECTS)
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
        $(verbose) mkdir -p priv/
        $(link_verbose) $(CC) $(OBJECTS) \
                $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
-               -o $(C_SRC_OUTPUT)
+               -o $(C_SRC_OUTPUT_FILE)
 
 %.o: %.c
        $(COMPILE_C) $(OUTPUT_OPTION) $<
@@ -5705,13 +5761,13 @@ $(C_SRC_OUTPUT): $(OBJECTS)
 clean:: clean-c_src
 
 clean-c_src:
-       $(gen_verbose) rm -f $(C_SRC_OUTPUT) $(OBJECTS)
+       $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
 
 endif
 
 ifneq ($(wildcard $(C_SRC_DIR)),)
 $(C_SRC_ENV):
-       $(verbose) $(ERL) -eval "file:write_file(\"$(C_SRC_ENV)\", \
+       $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
                io_lib:format( \
                        \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
                        \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
@@ -5889,7 +5945,7 @@ endif
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
-.PHONY: ct distclean-ct
+.PHONY: ct apps-ct distclean-ct
 
 # Configuration.
 
@@ -5924,17 +5980,33 @@ CT_RUN = ct_run \
        -logdir $(CURDIR)/logs
 
 ifeq ($(CT_SUITES),)
-ct:
+ct: $(if $(IS_APP),,apps-ct)
 else
-ct: test-build
+ct: test-build $(if $(IS_APP),,apps-ct)
        $(verbose) mkdir -p $(CURDIR)/logs/
        $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
 endif
 
+ifneq ($(ALL_APPS_DIRS),)
+apps-ct:
+       $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app ct IS_APP=1; done
+endif
+
+ifndef t
+CT_EXTRA =
+else
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+endif
+
 define ct_suite_target
 ct-$(1): test-build
        $(verbose) mkdir -p $(CURDIR)/logs/
-       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_OPTS)
+       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
 endef
 
 $(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
@@ -5953,9 +6025,8 @@ DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
 export DIALYZER_PLT
 
 PLT_APPS ?=
-DIALYZER_DIRS ?= --src -r src
-DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \
-       -Wunmatched_returns # -Wunderspecs
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
 
 # Core targets.
 
@@ -5971,6 +6042,18 @@ help::
 
 # Plugin-specific targets.
 
+define filter_opts.erl
+       Opts = binary:split(<<"$1">>, <<"-">>, [global]),
+       Filtered = lists:reverse(lists:foldl(fun
+               (O = <<"pa ", _/bits>>, Acc) -> [O|Acc];
+               (O = <<"D ", _/bits>>, Acc) -> [O|Acc];
+               (O = <<"I ", _/bits>>, Acc) -> [O|Acc];
+               (_, Acc) -> Acc
+       end, [], Opts)),
+       io:format("~s~n", [[["-", O] || O <- Filtered]]),
+       halt().
+endef
+
 $(DIALYZER_PLT): deps app
        $(verbose) dialyzer --build_plt --apps erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS)
 
@@ -5984,7 +6067,7 @@ dialyze:
 else
 dialyze: $(DIALYZER_PLT)
 endif
-       $(verbose) dialyzer --no_native $(DIALYZER_DIRS) $(DIALYZER_OPTS)
+       $(verbose) dialyzer --no_native `$(call erlang,$(call filter_opts.erl,$(ERLC_OPTS)))` $(DIALYZER_DIRS) $(DIALYZER_OPTS)
 
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -5997,58 +6080,20 @@ EDOC_OPTS ?=
 
 # Core targets.
 
-docs:: distclean-edoc edoc
+ifneq ($(wildcard doc/overview.edoc),)
+docs:: edoc
+endif
 
 distclean:: distclean-edoc
 
 # Plugin-specific targets.
 
-edoc: doc-deps
+edoc: distclean-edoc doc-deps
        $(gen_verbose) $(ERL) -eval 'edoc:application($(PROJECT), ".", [$(EDOC_OPTS)]), halt().'
 
 distclean-edoc:
        $(gen_verbose) rm -f doc/*.css doc/*.html doc/*.png doc/edoc-info
 
-# Copyright (c) 2015, Erlang Solutions Ltd.
-# This file is part of erlang.mk and subject to the terms of the ISC License.
-
-.PHONY: elvis distclean-elvis
-
-# Configuration.
-
-ELVIS_CONFIG ?= $(CURDIR)/elvis.config
-
-ELVIS ?= $(CURDIR)/elvis
-export ELVIS
-
-ELVIS_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis
-ELVIS_CONFIG_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis.config
-ELVIS_OPTS ?=
-
-# Core targets.
-
-help::
-       $(verbose) printf "%s\n" "" \
-               "Elvis targets:" \
-               "  elvis       Run Elvis using the local elvis.config or download the default otherwise"
-
-distclean:: distclean-elvis
-
-# Plugin-specific targets.
-
-$(ELVIS):
-       $(gen_verbose) $(call core_http_get,$(ELVIS),$(ELVIS_URL))
-       $(verbose) chmod +x $(ELVIS)
-
-$(ELVIS_CONFIG):
-       $(verbose) $(call core_http_get,$(ELVIS_CONFIG),$(ELVIS_CONFIG_URL))
-
-elvis: $(ELVIS) $(ELVIS_CONFIG)
-       $(verbose) $(ELVIS) rock -c $(ELVIS_CONFIG) $(ELVIS_OPTS)
-
-distclean-elvis:
-       $(gen_verbose) rm -rf $(ELVIS)
-
 # Copyright (c) 2014 Dave Cottlehuber <dch@skunkwerks.at>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
@@ -6114,6 +6159,74 @@ escript:: distclean-escript deps app
 distclean-escript:
        $(gen_verbose) rm -f $(ESCRIPT_NAME)
 
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "EUnit targets:" \
+               "  eunit       Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+       case "$(COVER)" of
+               "" -> ok;
+               _ ->
+                       case cover:compile_beam_directory("ebin") of
+                               {error, _} -> halt(1);
+                               _ -> ok
+                       end
+       end,
+       case eunit:test($1, [$(EUNIT_OPTS)]) of
+               ok -> ok;
+               error -> halt(2)
+       end,
+       case "$(COVER)" of
+               "" -> ok;
+               _ ->
+                       cover:export("eunit.coverdata")
+       end,
+       halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(DEPS_DIR)/*/ebin $(APPS_DIR)/*/ebin ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build
+       $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build
+       $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(call core_find,ebin/,*.beam)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.beam)))
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+       $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP),,apps-eunit)
+       $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit:
+       $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; done
+endif
+endif
+
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
diff --git a/rabbitmq-server/deps/rabbitmq_management/etc/bunny.config b/rabbitmq-server/deps/rabbitmq_management/etc/bunny.config
deleted file mode 100644 (file)
index 4afc9d5..0000000
+++ /dev/null
@@ -1 +0,0 @@
-[{rabbitmq_management, [{listener,[{port, 15674}]}]}].
diff --git a/rabbitmq-server/deps/rabbitmq_management/etc/hare.config b/rabbitmq-server/deps/rabbitmq_management/etc/hare.config
deleted file mode 100644 (file)
index bd8b3d7..0000000
+++ /dev/null
@@ -1 +0,0 @@
-[{rabbitmq_management, [{listener,[{port, 15673}]}]}].
diff --git a/rabbitmq-server/deps/rabbitmq_management/etc/rabbit-test.config b/rabbitmq-server/deps/rabbitmq_management/etc/rabbit-test.config
deleted file mode 100644 (file)
index 6b9bbe2..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-%% We test sample retention separately in rabbit_mgmt_test_db_unit,
-%% but for rabbit_mgmt_test_db we want to make sure samples never
-%% expire.
-[{rabbitmq_management, [{sample_retention_policies,
-                         %% List of {MaxAgeSecs, IfTimestampDivisibleBySecs}
-                         [{global,   [{10000000000000, 1}]},
-                          {basic,    [{10000000000000, 1}]},
-                          {detailed, [{10000000000000, 1}]}]},
-                        %% We're going to test this, so enable it!
-                        {rates_mode, detailed}
-                       ]}
-].
index fc8f58e9ea0a95b25d9f63e74dd3468b09210857..a5f6209e02a92dc70a845d838ab96e4f9ad59ffb 100644 (file)
@@ -18,6 +18,5 @@
                   password = none,
                   impl}). % storage for a context of the resource handler
 -record(range, {first, last, incr}).
--record(stats, {diffs, base}).
 
 -define(AUTH_REALM, "Basic realm=\"RabbitMQ Management\"").
diff --git a/rabbitmq-server/deps/rabbitmq_management/include/rabbit_mgmt_event_collector.hrl b/rabbitmq-server/deps/rabbitmq_management/include/rabbit_mgmt_event_collector.hrl
new file mode 100644 (file)
index 0000000..816365c
--- /dev/null
@@ -0,0 +1,32 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ.
+%%
+%%   The Initial Developer of the Original Code is Pivotal Software, Inc.
+%%   Copyright (c) 2010-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-record(state, {
+          lookups,
+          interval,
+          event_refresh_ref,
+          rates_mode,
+          max_backlog}).
+
+-define(FINE_STATS_TYPES, [channel_queue_stats, channel_exchange_stats,
+                           channel_queue_exchange_stats]).
+
+-define(TABLES, [queue_stats, connection_stats, channel_stats,
+                 consumers_by_queue, consumers_by_channel,
+                 node_stats, node_node_stats,
+                 %% What the previous info item was for any given
+                 %% {queue/channel/connection}
+                 old_stats]).
diff --git a/rabbitmq-server/deps/rabbitmq_management/include/rabbit_mgmt_metrics.hrl b/rabbitmq-server/deps/rabbitmq_management/include/rabbit_mgmt_metrics.hrl
new file mode 100644 (file)
index 0000000..04ec4c1
--- /dev/null
@@ -0,0 +1,211 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ.
+%%
+%%   The Initial Developer of the Original Code is Pivotal Software, Inc.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-define(DELIVER_GET, [deliver, deliver_no_ack, get, get_no_ack]).
+-define(FINE_STATS, [publish, publish_in, publish_out,
+                     ack, deliver_get, confirm, return_unroutable, redeliver] ++
+            ?DELIVER_GET).
+
+%% Most come from channels as fine stats, but queues emit these directly.
+-define(QUEUE_MSG_RATES, [disk_reads, disk_writes]).
+
+-define(MSG_RATES, ?FINE_STATS ++ ?QUEUE_MSG_RATES).
+
+-define(MSG_RATES_DETAILS, [publish_details, publish_in_details,
+                            publish_out_details, ack_details,
+                            deliver_get_details, confirm_details,
+                            return_unroutable_details, redeliver_details,
+                            deliver_details, deliver_no_ack_details,
+                            get_details, get_no_ack_details,
+                            disk_reads_details, disk_writes_details] ++ ?MSG_RATES).
+
+-define(QUEUE_MSG_COUNTS, [messages, messages_ready, messages_unacknowledged]).
+
+-define(COARSE_NODE_STATS,
+        [mem_used, fd_used, sockets_used, proc_used, disk_free,
+         io_read_count,  io_read_bytes,  io_read_time,
+         io_write_count, io_write_bytes, io_write_time,
+         io_sync_count,  io_sync_time,
+         io_seek_count,  io_seek_time,
+         io_reopen_count, mnesia_ram_tx_count,  mnesia_disk_tx_count,
+         msg_store_read_count, msg_store_write_count,
+         queue_index_journal_write_count,
+         queue_index_write_count, queue_index_read_count,
+         gc_num, gc_bytes_reclaimed, context_switches,
+         io_file_handle_open_attempt_count, io_file_handle_open_attempt_time]).
+
+-define(COARSE_NODE_NODE_STATS, [send_bytes, recv_bytes]).
+
+%% Normally 0 and no history means "has never happened, don't
+%% report". But for these things we do want to report even at 0 with
+%% no history.
+-define(ALWAYS_REPORT_STATS,
+        [io_read_time, io_write_time,
+         io_sync_time, sockets_used | ?QUEUE_MSG_COUNTS]).
+
+-define(COARSE_CONN_STATS, [recv_oct, send_oct]).
+
+-define(PROCESS_STATS, [reductions]).
+
+-type(event_type() :: queue_stats | queue_exchange_stats | vhost_stats
+                    | channel_queue_stats | channel_stats
+                    | channel_exchange_stats | exchange_stats
+                    | node_stats | node_node_stats | connection_stats).
+-type(type() :: deliver_get | fine_stats | queue_msg_rates | queue_msg_counts
+              | coarse_node_stats | coarse_node_node_stats | coarse_conn_stats
+              | process_stats).
+
+-type(table_name() :: atom()).
+
+%% TODO remove unused tables
+%% Not all events generate all metrics, so some of the tables may be deleted
+-define(AGGR_TABLES, [aggr_queue_stats_fine_stats,
+                      aggr_queue_stats_deliver_get,
+                      aggr_queue_stats_queue_msg_counts,
+                      aggr_queue_stats_queue_msg_rates,
+                      aggr_queue_stats_process_stats,
+                      aggr_queue_exchange_stats_fine_stats,
+                      aggr_vhost_stats_deliver_get,
+                      aggr_vhost_stats_fine_stats,
+                      aggr_vhost_stats_queue_msg_rates,
+                      aggr_vhost_stats_queue_msg_counts,
+                      aggr_vhost_stats_coarse_conn_stats,
+                      aggr_channel_queue_stats_deliver_get,
+                      aggr_channel_queue_stats_fine_stats,
+                      aggr_channel_queue_stats_queue_msg_counts,
+                      aggr_channel_stats_deliver_get,
+                      aggr_channel_stats_fine_stats,
+                      aggr_channel_stats_queue_msg_counts,
+                      aggr_channel_stats_process_stats,
+                      aggr_channel_exchange_stats_deliver_get,
+                      aggr_channel_exchange_stats_fine_stats,
+                      aggr_exchange_stats_fine_stats,
+                      aggr_node_stats_coarse_node_stats,
+                      aggr_node_node_stats_coarse_node_node_stats,
+                      aggr_connection_stats_coarse_conn_stats,
+                      aggr_connection_stats_process_stats
+                     ]).
+
+-define(INDEX_TABLES, [aggr_queue_stats_fine_stats_index,
+                       aggr_queue_stats_deliver_get_index,
+                       aggr_queue_stats_queue_msg_counts_index,
+                       aggr_queue_stats_queue_msg_rates_index,
+                       aggr_queue_stats_process_stats_index,
+                       aggr_queue_exchange_stats_fine_stats_index,
+                       aggr_vhost_stats_deliver_get_index,
+                       aggr_vhost_stats_fine_stats_index,
+                       aggr_vhost_stats_queue_msg_rates_index,
+                       aggr_vhost_stats_queue_msg_counts_index,
+                       aggr_vhost_stats_coarse_conn_stats_index,
+                       aggr_channel_queue_stats_deliver_get_index,
+                       aggr_channel_queue_stats_fine_stats_index,
+                       aggr_channel_queue_stats_queue_msg_counts_index,
+                       aggr_channel_stats_deliver_get_index,
+                       aggr_channel_stats_fine_stats_index,
+                       aggr_channel_stats_queue_msg_counts_index,
+                       aggr_channel_stats_process_stats_index,
+                       aggr_channel_exchange_stats_deliver_get_index,
+                       aggr_channel_exchange_stats_fine_stats_index,
+                       aggr_exchange_stats_fine_stats_index,
+                       aggr_node_stats_coarse_node_stats_index,
+                       aggr_node_node_stats_coarse_node_node_stats_index,
+                       aggr_connection_stats_coarse_conn_stats_index,
+                       aggr_connection_stats_process_stats_index
+                      ]).
+
+-define(KEY_INDEX_TABLES,
+        [aggr_queue_stats_fine_stats_key_index,
+         aggr_queue_stats_deliver_get_key_index,
+         aggr_queue_stats_queue_msg_counts_key_index,
+         aggr_queue_stats_queue_msg_rates_key_index,
+         aggr_queue_stats_process_stats_key_index,
+         aggr_queue_exchange_stats_fine_stats_key_index,
+         aggr_vhost_stats_deliver_get_key_index,
+         aggr_vhost_stats_fine_stats_key_index,
+         aggr_vhost_stats_queue_msg_rates_key_index,
+         aggr_vhost_stats_queue_msg_counts_key_index,
+         aggr_vhost_stats_coarse_conn_stats_key_index,
+         aggr_channel_queue_stats_deliver_get_key_index,
+         aggr_channel_queue_stats_fine_stats_key_index,
+         aggr_channel_queue_stats_queue_msg_counts_key_index,
+         aggr_channel_stats_deliver_get_key_index,
+         aggr_channel_stats_fine_stats_key_index,
+         aggr_channel_stats_queue_msg_counts_key_index,
+         aggr_channel_stats_process_stats_key_index,
+         aggr_channel_exchange_stats_deliver_get_key_index,
+         aggr_channel_exchange_stats_fine_stats_key_index,
+         aggr_exchange_stats_fine_stats_key_index,
+         aggr_node_stats_coarse_node_stats_key_index,
+         aggr_node_node_stats_coarse_node_node_stats_key_index,
+         aggr_connection_stats_coarse_conn_stats_key_index,
+         aggr_connection_stats_process_stats_key_index
+        ]).
+
+-define(PROC_STATS_TABLES,
+        [channel_stats, connection_stats]).
+
+%% Records are only used to retrieve the field position and to facilitate
+%% keeping track of the data
+-record(deliver_get, {deliver,
+                      deliver_no_ack,
+                      get,
+                      get_no_ack}).
+-record(fine_stats, {publish,
+                     publish_in,
+                     publish_out,
+                     ack,
+                     deliver_get,
+                     confirm,
+                     return_unroutable,
+                     redeliver}).
+-record(queue_msg_rates, {disk_reads,
+                          disk_writes}).
+-record(queue_msg_counts, {messages,
+                           messages_ready,
+                           messages_unacknowledged}).
+-record(coarse_node_stats, {mem_used,
+                            fd_used,
+                            sockets_used,
+                            proc_used,
+                            disk_free,
+                            io_read_count,
+                            io_read_bytes,
+                            io_read_time,
+                            io_write_count,
+                            io_write_bytes,
+                            io_write_time,
+                            io_sync_count,
+                            io_sync_time,
+                            io_seek_count,
+                            io_seek_time,
+                            io_reopen_count,
+                            mnesia_ram_tx_count,
+                            mnesia_disk_tx_count,
+                            msg_store_read_count,
+                            msg_store_write_count,
+                            queue_index_journal_write_count,
+                            queue_index_write_count,
+                            queue_index_read_count,
+                            gc_num,
+                            gc_bytes_reclaimed,
+                            context_switches,
+                            io_file_handle_open_attempt_count,
+                            io_file_handle_open_attempt_time}).
+-record(coarse_node_node_stats, {send_bytes,
+                                 recv_bytes}).
+-record(coarse_conn_stats, {recv_oct,
+                            send_oct}).
+-record(process_stats, {reductions}).
index 3d73c7839d4bf01ddf318321f6f1be0a554f1619..dca669d75bd932f828ac3e0e66877904d710963a 100644 (file)
@@ -7,6 +7,5 @@
 -define(BAD_REQUEST, 400).
 -define(NOT_AUTHORISED, 401).
 %%-define(NOT_FOUND, 404). Defined for AMQP by amqp_client.hrl (as 404)
--define(PREFIX, "http://localhost:15672/api").
 %% httpc seems to get racy when using HTTP 1.1
 -define(HTTPC_OPTS, [{version, "HTTP/1.0"}]).
index ac43f8ebe1b337c17ad26186ffd68a062cbee97f..1cb9ed5a46d13d140af255b20ce4fceff0500920 100644 (file)
@@ -778,6 +778,36 @@ or:
           repeatedly pinged).
         </td>
       </tr>
+      <tr>
+        <td>X</td>
+        <td></td>
+        <td></td>
+        <td></td>
+        <td class="path">/api/healthchecks/node</td>
+        <td>
+          Runs basic healthchecks in the current node. Checks that the rabbit
+          application is running, channels and queues can be listed successfully, and
+          that no alarms are in effect. If everything is working correctly, will
+          return HTTP status 200 with body: <pre>{"status":"ok"}</pre> If
+          something fails, will return HTTP status 200 with the body of
+          <pre>{"status":"failed","reason":"string"}</pre>
+        </td>
+      </tr>
+      <tr>
+        <td>X</td>
+        <td></td>
+        <td></td>
+        <td></td>
+        <td class="path">/api/healthchecks/node/<i>node</i></td>
+        <td>
+          Runs basic healthchecks in the given node. Checks that the rabbit
+          application is running, list_channels and list_queues return, and
+          that no alarms are raised. If everything is working correctly, will
+          return HTTP status 200 with body: <pre>{"status":"ok"}</pre> If
+          something fails, will return HTTP status 200 with the body of
+          <pre>{"status":"failed","reason":"string"}</pre>
+        </td>
+      </tr>
     </table>
   </body>
 </html>
index 14759ffeecc540000a014e721c7f5999d6bc7857..7ca790e53473916da033811f308aa9a0c87c6e07 100644 (file)
@@ -1,4 +1,5 @@
 <!doctype html>
+<meta http-equiv="X-UA-Compatible" content="IE=edge" />
 <html>
   <head>
     <title>RabbitMQ Management</title>
index 0ec370fb3219881ab2cb2774c9144e441019136e..cccbc4ca11fab80ed2af27e1e645f6b94aaafa1b 100644 (file)
@@ -29,6 +29,11 @@ function data_rates(id, stats) {
     return rates_chart_or_text(id, stats, items, fmt_rate_bytes, fmt_rate_bytes_axis, true, 'Data rates');
 }
 
+function data_reductions(id, stats) {
+    var items = [['Reductions', 'reductions']];
+    return rates_chart_or_text(id, stats, items, fmt_rate, fmt_rate_axis, true, 'Reductions (per second)', 'process-reductions');
+}
+
 function rates_chart_or_text(id, stats, items, fmt, axis_fmt, chart_rates,
                              heading, heading_help) {
     var prefix = chart_h3(id, heading, heading_help);
@@ -243,7 +248,8 @@ function render_charts() {
     });
 }
 
-var chart_colors = {full: ['#edc240', '#afd8f8', '#cb4b4b', '#4da74d', '#9440ed', '#666666', '#aaaaaa'],
+var chart_colors = {full: ['#edc240', '#afd8f8', '#cb4b4b', '#4da74d', '#9440ed', '#666666', '#aaaaaa', 
+                           '#7c79c3', '#8e6767', '#67808e', '#e5e4ae', '#4b4a55', '#bba0c1'],
                     node: ['#6ae26a', '#e24545']};
 
 var chart_chrome = {
index 8d4a0ff17a5b60ee097ebfab5cdb246573f8dca0..8cec43d01d0f8a452ae33f9fd0c040f281146cfe 100644 (file)
@@ -61,7 +61,7 @@ dispatcher_add(function(sammy) {
 
     sammy.get('#/channels/:name', function() {
             render({'channel': {path:   '/channels/' + esc(this.params['name']),
-                                options:{ranges:['msg-rates-ch']}}},
+                                options:{ranges:['data-rates-ch','msg-rates-ch']}}},
                    'channel', '#/channels');
         });
 
@@ -102,7 +102,7 @@ dispatcher_add(function(sammy) {
     sammy.get('#/queues/:vhost/:name', function() {
             var path = '/queues/' + esc(this.params['vhost']) + '/' + esc(this.params['name']);
             render({'queue': {path:    path,
-                              options: {ranges:['lengths-q', 'msg-rates-q']}},
+                              options: {ranges:['lengths-q', 'msg-rates-q', 'data-rates-q']}},
                     'bindings': path + '/bindings'}, 'queue', '#/queues');
         });
     sammy.put('#/queues', function() {
index 694926aaa7892e0798d4f5766d3627f63e76953d..29bce6a2a0a7a2f51fd0668448778c7efef9ac15 100644 (file)
@@ -41,8 +41,8 @@ var NAVIGATION = {'Overview':    ['#/',            "management"],
                   'Admin':
                     [{'Users':         ['#/users',    "administrator"],
                       'Virtual Hosts': ['#/vhosts',   "administrator"],
-                      'Policies':      ['#/policies', "policymaker"]},
-                     "policymaker"]
+                      'Policies':      ['#/policies', "management"]},
+                     "management"]
                  };
 
 var CHART_PERIODS = {'60|5':       'Last minute',
@@ -126,7 +126,7 @@ var COLUMNS =
                      ['memory',             'Memory',             true],
                      ['disk_space',         'Disk space',         true]],
       'General': [['uptime',     'Uptime',     false],
-                  ['rates_mode', 'Rates mode', false],
+                  ['rates_mode', 'Rates mode', true],
                   ['info',       'Info',       true]]}};
 
 ///////////////////////////////////////////////////////////////////////////
@@ -138,7 +138,7 @@ var COLUMNS =
 // All these are to do with hiding UI elements if
 var rates_mode;                  // ...there are no fine stats
 var user_administrator;          // ...user is not an admin
-var user_policymaker;            // ...user is not a policymaker
+var is_user_policymaker;         // ...user is not a policymaker
 var user_monitor;                // ...user cannot monitor
 var nodes_interesting;           // ...we are not in a cluster
 var vhosts_interesting;          // ...there is only one vhost
@@ -166,7 +166,7 @@ function setup_global_vars() {
     rates_mode = overview.rates_mode;
     user_tags = expand_user_tags(user.tags.split(","));
     user_administrator = jQuery.inArray("administrator", user_tags) != -1;
-    user_policymaker = jQuery.inArray("policymaker", user_tags) != -1;
+    is_user_policymaker = jQuery.inArray("policymaker", user_tags) != -1;
     user_monitor = jQuery.inArray("monitoring", user_tags) != -1;
     replace_content('login-details',
                     '<p>User: <b>' + fmt_escape_html(user.name) + '</b></p>' +
index 37f92f6f6c341b28706ac0caba2c715f85df11a8..603a48d34463cbab186e111ebb5d7f9526aff1f2 100644 (file)
@@ -328,6 +328,18 @@ HELP = {
         <dd>Rate at which queue index segment files are written. </dd>\
       </dl>',
 
+    'gc-operations':
+    'Rate at which garbage collection operations take place on this node.',
+
+    'gc-bytes':
+    'Rate at which memory is reclaimed by the garbage collector on this node.',
+
+    'context-switches-operations':
+    'Rate at which runtime context switching takes place on this node.',
+
+    'process-reductions':
+    'Rate at which reductions take place on this process.',
+
     'foo': 'foo' // No comma.
 };
 
index bb2e70f5a8267d6847387c91ed388f8e94992c51..2d4c13dfe68619fe4630af7306dca7c4df6366e9 100644 (file)
@@ -139,10 +139,13 @@ function update_vhosts() {
 
 function setup_extensions() {
     var extensions = JSON.parse(sync_get('/extensions'));
-    extension_count = extensions.length;
+    extension_count = 0;
     for (var i in extensions) {
         var extension = extensions[i];
-        dynamic_load(extension.javascript);
+        if ($.isPlainObject(extension) && extension.hasOwnProperty("javascript")) {
+            dynamic_load(extension.javascript);
+            extension_count++;
+        }
     }
 }
 
@@ -433,6 +436,16 @@ function apply_state(reqs) {
                 qs.push(prefix + '_incr=' + parseInt(range[1]));
             }
         }
+        /* Unknown options are used as query parameters as is. */
+        Object.keys(options).forEach(function (key) {
+          /* Skip known keys we already handled and undefined parameters. */
+          if (key == 'vhost' || key == 'sort' || key == 'ranges')
+            return;
+          if (!key || options[key] == undefined)
+            return;
+
+          qs.push(esc(key) + '=' + esc(options[key]));
+        });
         qs = qs.join('&');
         if (qs != '')
             if (req2.indexOf("?page=") >- 1)
index 1e9d18e7f98a9878669d67f939b34e7df45ffe9a..b5e3c8cdb9be1cd9a5b692ff51e9ca92ec751bf1 100644 (file)
 </div>
 </div>
 <% } %>
+
+<% if(channel.reductions || channel.garbage_collection) { %>
+<div class="section-hidden">
+<h2>Runtime Metrics (Advanced)</h2>
+ <div class="hider updatable">
+ <%= data_reductions('reductions-rates-conn', channel) %>
+ <table class="facts">
+    <% if (channel.garbage_collection.min_bin_vheap_size) { %>
+        <tr>
+        <th>Minimum binary virtual heap size in words (min_bin_vheap_size)</th>
+        <td><%= channel.garbage_collection.min_bin_vheap_size %></td>
+        </tr>
+    <% } %>
+
+    <% if (channel.garbage_collection.min_heap_size) { %>
+        <tr>
+        <th>Minimum heap size in words (min_heap_size)</th>
+        <td><%= channel.garbage_collection.min_heap_size %></td>
+        </tr>
+    <% } %>
+
+    <% if (channel.garbage_collection.fullsweep_after) { %>
+        <tr>
+        <th>Maximum generational collections before fullsweep (fullsweep_after)</th>
+        <td><%= channel.garbage_collection.fullsweep_after %></td>
+        </tr>
+    <% } %>
+
+    <% if (channel.garbage_collection.minor_gcs) { %>
+        <tr>
+        <th>Number of minor GCs (minor_gcs)</th>
+        <td><%= channel.garbage_collection.minor_gcs %></td>
+        </tr>
+    <% } %>
+ </table>
+ </div>
+</div>
+
+<% } %>
+
index ae1ce9b08011d7a822706f650bb8f37341fb2651..eb79fb9fdbd66f6a51096447162f86c9d032886d 100644 (file)
@@ -1,4 +1,4 @@
-<h1>Connection <b><%= fmt_string(connection.name) %></b><%= fmt_maybe_vhost(connection.vhost) %></h1>
+<h1>Connection <%= fmt_string(connection.name) %> <%= fmt_maybe_vhost(connection.vhost) %></h1>
 
 <div class="section">
 <h2>Overview</h2>
   <td><%= fmt_node(connection.node) %></td>
 </tr>
 <% } %>
+
+<% if (connection.client_properties.connection_name) { %>
+<tr>
+  <th>Client-provided name</th>
+  <td><%= fmt_string(connection.client_properties.connection_name) %></td>
+</tr>
+<% } %>
+
 <tr>
  <th>Username</th>
  <td><%= fmt_string(connection.user) %></td>
@@ -60,6 +68,7 @@
  <td><%= connection.channel_max %> channels</td>
 </tr>
 </table>
+
 <% } %>
 
 </div>
 </div>
 <% } %>
 
+<% if(connection.reductions || connection.garbage_collection) { %>
+<div class="section-hidden">
+<h2>Runtime Metrics (Advanced)</h2>
+ <div class="hider updatable">
+ <%= data_reductions('reductions-rates-conn', connection) %>
+ <table class="facts">
+    <% if (connection.garbage_collection.min_bin_vheap_size) { %>
+        <tr>
+        <th>Minimum binary virtual heap size in words (min_bin_vheap_size)</th>
+        <td><%= connection.garbage_collection.min_bin_vheap_size %></td>
+        </tr>
+    <% } %>
+
+    <% if (connection.garbage_collection.min_heap_size) { %>
+        <tr>
+        <th>Minimum heap size in words (min_heap_size)</th>
+        <td><%= connection.garbage_collection.min_heap_size %></td>
+        </tr>
+    <% } %>
+
+    <% if (connection.garbage_collection.fullsweep_after) { %>
+        <tr>
+        <th>Maximum generational collections before fullsweep (fullsweep_after)</th>
+        <td><%= connection.garbage_collection.fullsweep_after %></td>
+        </tr>
+    <% } %>
+
+    <% if (connection.garbage_collection.minor_gcs) { %>
+        <tr>
+        <th>Number of minor GCs (minor_gcs)</th>
+        <td><%= connection.garbage_collection.minor_gcs %></td>
+        </tr>
+    <% } %>
+ </table>
+ </div>
+</div>
+
+<% } %>
+
+
 <div class="section-hidden">
   <h2>Close this connection</h2>
   <div class="hider">
index f9f96df9d6d0b4ceedd9a0f4d63aab10a6514e38..7b71051623ab8abd64462975a3d433bf337877af 100644 (file)
@@ -16,7 +16,7 @@
 <% if (vhosts_interesting) { %>
     <th><%= fmt_sort('Virtual host', 'vhost') %></th>
 <% } %>
-    <th><%= fmt_sort('Name',           'name') %></th>
+    <th><%= fmt_sort('Name',           'client_properties.connection_name;name') %></th>
 <% if (nodes_interesting) { %>
     <th><%= fmt_sort('Node',           'node') %></th>
 <% } %>
 <% if (vhosts_interesting) { %>
     <td><%= fmt_string(connection.vhost) %></td>
 <% } %>
+<% if(connection.client_properties.connection_name) { %>
+    <td><%= link_conn(connection.name) %>
+        <%= fmt_string(short_conn(connection.client_properties.connection_name)) %>
+    </td>
+<% } else { %>
     <td><%= link_conn(connection.name) %></td>
+<% } %>
 <% if (nodes_interesting) { %>
     <td><%= fmt_node(connection.node) %></td>
 <% } %>
index adde0cfca32f0e0778a4e7eb07bf63e4a5a3979f..8cd71bb1683022367e0f06c8044728a7568e4ee0 100644 (file)
        ['Write', 'io_write_count'],
        ['Seek', 'io_seek_count'],
        ['Sync', 'io_sync_count'],
-       ['Reopen', 'io_reopen_count']],
+       ['File handle reopen', 'io_reopen_count'],
+       ['File handle open attempt', 'io_file_handle_open_attempt_count']],
       fmt_rate, fmt_rate_axis, true, 'I/O operations', 'io-operations') %>
 
   <%= rates_chart_or_text('persister-io-stats-bytes', node,
       [['Read', 'io_read_avg_time'],
        ['Write', 'io_write_avg_time'],
        ['Seek', 'io_seek_avg_time'],
-       ['Sync', 'io_sync_avg_time']],
+       ['Sync', 'io_sync_avg_time'],
+       ['File handle open attempt', 'io_file_handle_open_attempt_avg_time']],
       fmt_ms, fmt_ms, false, 'I/O average time per operation') %>
 </div>
 </div>
   </table>
   </div>
 
+  <%= rates_chart_or_text('advanced-gc-stats-count', node,
+      [['GC', 'gc_num']],
+      fmt_rate, fmt_rate_axis, true, 'GC operations', 'gc-operations') %>
+
+  <%= rates_chart_or_text('advanced-gc-bytes-stats-count', node,
+      [['GC bytes reclaimed', 'gc_bytes_reclaimed']],
+      fmt_rate, fmt_rate_axis, true, 'GC bytes reclaimed', 'gc-bytes') %>
+
+  <%= rates_chart_or_text('advanced-context-switches-stats-count', node,
+      [['Context switches', 'context_switches']],
+      fmt_rate, fmt_rate_axis, true, 'Context switch operations', 'context-switches-operations') %>
+
 <h3>All applications</h3>
 <table class="list">
     <tr>
index 9e4e3c2c3f263cd3a16aa2b3c20721e1f34faff4..1816f7a6a7b0451679a4f896c33ae4fc0adbb892 100644 (file)
 <% if (vhosts_interesting) { %>
      <td><%= fmt_string(policy.vhost) %></td>
 <% } %>
+<% if (is_user_policymaker) { %>
      <td><%= link_policy(policy.vhost, policy.name) %></td>
+<% } else { %>
+     <td><%= fmt_string(policy.name) %></td>
+<% } %>
      <td><%= fmt_string(policy.pattern) %></td>
      <td><%= fmt_string(policy['apply-to']) %></td>
      <td><%= fmt_table_short(policy.definition) %></td>
@@ -42,6 +46,7 @@
   </div>
   </div>
 </div>
+<% if (is_user_policymaker) { %>
 
 <div class="section-hidden">
   <h2>Add / update a policy</h2>
     </form>
   </div>
 </div>
+<% } %>
index 46fcd4426c81ca59c371f5945524086c71032fa3..23cad6584dfb2378306fec71cb1b599268152786 100644 (file)
   </div>
 </div>
 
-<% if (user_policymaker) { %>
+<% if (is_user_policymaker) { %>
 <div class="section-hidden">
   <h2>Move messages</h2>
   <div class="hider">
     </form>
   </div>
 </div>
+
+<% if(queue.reductions || queue.garbage_collection) { %>
+<div class="section-hidden">
+<h2>Runtime Metrics (Advanced)</h2>
+ <div class="hider updatable">
+ <%= data_reductions('reductions-rates-queue', queue) %>
+ <table class="facts">
+    <% if (queue.garbage_collection.min_bin_vheap_size) { %>
+        <tr>
+        <th>Minimum binary virtual heap size in words (min_bin_vheap_size)</th>
+        <td><%= queue.garbage_collection.min_bin_vheap_size %></td>
+        </tr>
+    <% } %>
+
+    <% if (queue.garbage_collection.min_heap_size) { %>
+        <tr>
+        <th>Minimum heap size in words (min_heap_size)</th>
+        <td><%= queue.garbage_collection.min_heap_size %></td>
+        </tr>
+    <% } %>
+
+    <% if (queue.garbage_collection.fullsweep_after) { %>
+        <tr>
+        <th>Maximum generational collections before fullsweep (fullsweep_after)</th>
+        <td><%= queue.garbage_collection.fullsweep_after %></td>
+        </tr>
+    <% } %>
+
+    <% if (queue.garbage_collection.minor_gcs) { %>
+        <tr>
+        <th>Number of minor GCs (minor_gcs)</th>
+        <td><%= queue.garbage_collection.minor_gcs %></td>
+        </tr>
+    <% } %>
+ </table>
+ </div>
+</div>
+
+<% } %>
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
diff --git a/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_channel_stats_collector.erl b/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_channel_stats_collector.erl
new file mode 100644 (file)
index 0000000..6bd222f
--- /dev/null
@@ -0,0 +1,125 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ.
+%%
+%%   The Initial Developer of the Original Code is Pivotal Software, Inc.
+%%   Copyright (c) 2010-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_channel_stats_collector).
+
+-include("rabbit_mgmt.hrl").
+-include("rabbit_mgmt_metrics.hrl").
+-include("rabbit_mgmt_event_collector.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+         code_change/3, handle_pre_hibernate/1]).
+
+-export([prioritise_cast/3]).
+
+-import(rabbit_misc, [pget/3]).
+-import(rabbit_mgmt_db, [pget/2, id_name/1, id/2, lookup_element/2]).
+
+prioritise_cast({event, #event{type = channel_stats}}, Len,
+                #state{max_backlog = MaxBacklog} = _State)
+  when Len > MaxBacklog ->
+    drop;
+prioritise_cast(_Msg, _Len, _State) ->
+    0.
+
+%% See the comment on rabbit_mgmt_db for the explanation of
+%% events and stats.
+
+%% Although this gen_server could process all types of events through the
+%% handle_cast, rabbit_mgmt_db_handler (in the management agent) forwards
+%% only the non-prioritiy events channel_stats
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+start_link() ->
+    case gen_server2:start_link({global, ?MODULE}, ?MODULE, [], []) of
+        {ok, Pid} -> register(?MODULE, Pid), %% [1]
+                     {ok, Pid};
+        Else      -> Else
+    end.
+%% [1] For debugging it's helpful to locally register the name too
+%% since that shows up in places global names don't.
+
+%%----------------------------------------------------------------------------
+%% Internal, gen_server2 callbacks
+%%----------------------------------------------------------------------------
+
+init([]) ->
+    {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
+    {ok, RatesMode} = application:get_env(rabbitmq_management, rates_mode),
+    {ok, MaxBacklog} = application:get_env(rabbitmq_management,
+                                           stats_event_max_backlog),
+    process_flag(priority, high),
+    rabbit_log:info("Statistics channel stats collector started.~n"),
+    {ok, reset_lookups(
+           #state{interval               = Interval,
+                  rates_mode             = RatesMode,
+                  max_backlog            = MaxBacklog}), hibernate,
+     {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+%% Used in rabbit_mgmt_test_db where we need guarantees events have
+%% been handled before querying
+handle_call({event, Event = #event{reference = none}}, _From, State) ->
+    rabbit_mgmt_event_collector_utils:handle_event(Event, State),
+    reply(ok, State);
+
+handle_call(_Request, _From, State) ->
+    reply(not_understood, State).
+
+%% Only handle events that are real.
+handle_cast({event, Event = #event{reference = none}}, State) ->
+    rabbit_mgmt_event_collector_utils:handle_event(Event, State),
+    noreply(State);
+
+handle_cast({event, Event = #event{reference = Ref}},
+            State = #state{event_refresh_ref = Ref}) ->
+    rabbit_mgmt_event_collector_utils:handle_event(Event, State),
+    noreply(State);
+
+handle_cast(_Request, State) ->
+    noreply(State).
+
+handle_info(_Info, State) ->
+    noreply(State).
+
+terminate(_Arg, _State) ->
+    ok.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+reply(Reply, NewState) -> {reply, Reply, NewState, hibernate}.
+noreply(NewState) -> {noreply, NewState, hibernate}.
+
+reset_lookups(State) ->
+    State#state{lookups = [{exchange, fun rabbit_exchange:lookup/1},
+                           {queue,    fun rabbit_amqqueue:lookup/1}]}.
+
+handle_pre_hibernate(State) ->
+    %% rabbit_event can end up holding on to some memory after a busy
+    %% workout, but it's not a gen_server so we can't make it
+    %% hibernate. The best we can do is forcibly GC it here (if
+    %% rabbit_mgmt_db is hibernating the odds are rabbit_event is
+    %% quiescing in some way too).
+    rpc:multicall(
+      rabbit_mnesia:cluster_nodes(running), rabbit_mgmt_db_handler, gc, []),
+    {hibernate, State}.
diff --git a/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_cors.erl b/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_cors.erl
new file mode 100644 (file)
index 0000000..5226241
--- /dev/null
@@ -0,0 +1,88 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ Management Plugin.
+%%
+%%   The Initial Developer of the Original Code is GoPivotal, Inc.
+%%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+%% Useful documentation about CORS:
+%% * https://tools.ietf.org/html/rfc6454
+%% * https://www.w3.org/TR/cors/
+%% * https://staticapps.org/articles/cross-domain-requests-with-cors/
+-module(rabbit_mgmt_cors).
+
+-export([set_headers/2]).
+
+%% We don't set access-control-max-age because we currently have
+%% no way to know which headers apply to the whole resource. We
+%% only know for the next request.
+set_headers(ReqData, Module) ->
+    ReqData1 = case wrq:get_resp_header("vary", ReqData) of
+        undefined -> wrq:set_resp_header("vary", "origin", ReqData);
+        VaryValue -> wrq:set_resp_header("vary", VaryValue ++ ", origin", ReqData)
+    end,
+    case match_origin(ReqData1) of
+        false ->
+            ReqData1;
+        Origin ->
+            ReqData2 = case wrq:method(ReqData1) of
+                'OPTIONS' -> handle_options(ReqData1, Module);
+                _         -> ReqData1
+            end,
+            wrq:set_resp_headers([
+                {"access-control-allow-origin",      Origin},
+                {"access-control-allow-credentials", "true"}
+            ], ReqData2)
+    end.
+
+%% Set max-age from configuration (default: 30 minutes).
+%% Set allow-methods from what is defined in Module:allowed_methods/2.
+%% Set allow-headers to the same as the request (accept all headers).
+handle_options(ReqData, Module) ->
+    MaxAge = application:get_env(rabbitmq_management, cors_max_age, 1800),
+    {Methods, _, _} = Module:allowed_methods(undefined, undefined),
+    AllowMethods = string:join([atom_to_list(M) || M <- Methods], ", "),
+    ReqHeaders = wrq:get_req_header("access-control-request-headers", ReqData),
+    MaxAgeHd = case MaxAge of
+        undefined -> [];
+        _ -> {"access-control-max-age", integer_to_list(MaxAge)}
+    end,
+    MaybeAllowHeaders = case ReqHeaders of
+        undefined -> [];
+        _ -> [{"access-control-allow-headers", ReqHeaders}]
+    end,
+    wrq:set_resp_headers([MaxAgeHd,
+        {"access-control-allow-methods", AllowMethods}
+        |MaybeAllowHeaders], ReqData).
+
+%% If the origin header is missing or "null", we disable CORS.
+%% Otherwise, we only enable it if the origin is found in the
+%% cors_allow_origins configuration variable, or if "*" is (it
+%% allows all origins).
+match_origin(ReqData) ->
+    case wrq:get_req_header("origin", ReqData) of
+        undefined -> false;
+        "null" -> false;
+        Origin ->
+            AllowedOrigins = application:get_env(rabbitmq_management,
+                cors_allow_origins, []),
+            case lists:member(Origin, AllowedOrigins) of
+                true ->
+                    Origin;
+                false ->
+                    %% Maybe the configuration explicitly allows "*".
+                    case lists:member("*", AllowedOrigins) of
+                        true  -> Origin;
+                        false -> false
+                    end
+            end
+    end.
index efcf1365f94eb91248f02c0c6934eb874c91a8c6..8692bcac8c0ff9bf19d165c4fa67e9e0f933c5dc 100644 (file)
 -module(rabbit_mgmt_db).
 
 -include("rabbit_mgmt.hrl").
+-include("rabbit_mgmt_metrics.hrl").
 -include_lib("rabbit_common/include/rabbit.hrl").
 
 -behaviour(gen_server2).
 
 -export([start_link/0]).
+-export([pget/2, id_name/1, id/2, lookup_element/2]).
 
 -export([augment_exchanges/3, augment_queues/3,
          augment_nodes/2, augment_vhosts/2,
 
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
          code_change/3, handle_pre_hibernate/1,
-         prioritise_cast/3, prioritise_call/4, format_message_queue/2]).
+         format_message_queue/2]).
 
-%% For testing
--export([override_lookups/1, reset_lookups/0]).
-
--import(rabbit_misc, [pget/3, pset/3]).
+-import(rabbit_misc, [pget/3]).
 
 %% The management database listens to events broadcast via the
 %% rabbit_event mechanism, and responds to queries from the various
 %% have to have originated as fine grained stats, but can still have
 %% been aggregated.
 %%
-%% Created events and basic stats are stored in ETS tables by object,
-%% looked up in an orddict in #state.tables. Simple and detailed stats
-%% (which only differ depending on how they're keyed) are stored in
-%% #state.aggregated_stats.
+%% Created events and basic stats are stored in ETS tables by object.
+%% Simple and detailed stats (which only differ depending on how
+%% they're keyed) are stored in aggregated stats tables
+%% (see rabbit_mgmt_stats.erl and include/rabbit_mgmt_metrics.hrl)
 %%
-%% For detailed stats we also store an index for each object referencing
-%% all the other objects that form a detailed stats key with it. This is
-%% so that we can always avoid table scanning while deleting stats and
-%% thus make sure that handling deleted events is O(n)-ish.
+%% Keys from simple and detailed stats are aggregated in several
+%% records, stored in different ETS tables. We store a base counter
+%% for everything that happened before the samples we have kept,
+%% and a series of records which add the timestamp as part of the key.
 %%
-%% For each key for simple and detailed stats we maintain a #stats{}
-%% record, essentially a base counter for everything that happened
-%% before the samples we have kept, and a gb_tree of {timestamp,
-%% sample} values.
+%% Each ETS aggregated table has a GC process with a timer to periodically
+%% aggregate old samples in the base.
 %%
-%% We also have #state.old_stats to let us calculate instantaneous
+%% We also have old_stats to let us calculate instantaneous
 %% rates, in order to apportion simple / detailed stats into time
 %% slices as they come in. These instantaneous rates are not returned
 %% in response to any query, the rates shown in the API are calculated
 %% it's quite close to being a cache of "the previous stats we
 %% received".
 %%
-%% We also keep a timer going, in order to prune old samples from
-%% #state.aggregated_stats.
-%%
 %% Overall the object is to do all the aggregation when events come
 %% in, and make queries be simple lookups as much as possible. One
 %% area where this does not happen is the global overview - which is
 %% aggregated from vhost stats at query time since we do not want to
 %% reveal anything about other vhosts to unprivileged users.
 
--record(state, {
-          %% "stats" for which no calculations are required
-          tables,
-          %% database of aggregated samples
-          aggregated_stats,
-          %% index for detailed aggregated_stats that have 2-tuple keys
-          aggregated_stats_index,
-          %% What the previous info item was for any given
-          %% {queue/channel/connection}
-          old_stats,
-          gc_timer,
-          gc_next_key,
-          lookups,
-          interval,
-          event_refresh_ref,
-          rates_mode}).
-
--define(FINE_STATS_TYPES, [channel_queue_stats, channel_exchange_stats,
-                           channel_queue_exchange_stats]).
--define(TABLES, [queue_stats, connection_stats, channel_stats,
-                 consumers_by_queue, consumers_by_channel,
-                 node_stats, node_node_stats]).
-
--define(DELIVER_GET, [deliver, deliver_no_ack, get, get_no_ack]).
--define(FINE_STATS, [publish, publish_in, publish_out,
-                     ack, deliver_get, confirm, return_unroutable, redeliver] ++
-            ?DELIVER_GET).
-
-%% Most come from channels as fine stats, but queues emit these directly.
--define(QUEUE_MSG_RATES, [disk_reads, disk_writes]).
-
--define(MSG_RATES, ?FINE_STATS ++ ?QUEUE_MSG_RATES).
-
--define(QUEUE_MSG_COUNTS, [messages, messages_ready, messages_unacknowledged]).
-
--define(COARSE_NODE_STATS,
-        [mem_used, fd_used, sockets_used, proc_used, disk_free,
-         io_read_count,  io_read_bytes,  io_read_avg_time,
-         io_write_count, io_write_bytes, io_write_avg_time,
-         io_sync_count,  io_sync_avg_time,
-         io_seek_count,  io_seek_avg_time,
-         io_reopen_count, mnesia_ram_tx_count,  mnesia_disk_tx_count,
-         msg_store_read_count, msg_store_write_count,
-         queue_index_journal_write_count,
-         queue_index_write_count, queue_index_read_count]).
-
--define(COARSE_NODE_NODE_STATS, [send_bytes, recv_bytes]).
-
-%% Normally 0 and no history means "has never happened, don't
-%% report". But for these things we do want to report even at 0 with
-%% no history.
--define(ALWAYS_REPORT_STATS,
-        [io_read_avg_time, io_write_avg_time,
-         io_sync_avg_time, sockets_used | ?QUEUE_MSG_COUNTS]).
-
--define(COARSE_CONN_STATS, [recv_oct, send_oct]).
-
--define(GC_INTERVAL, 5000).
--define(GC_MIN_ROWS, 100).
--define(GC_MIN_RATIO, 0.01).
-
--define(DROP_LENGTH, 1000).
-
-prioritise_cast({event, #event{type  = Type,
-                               props = Props}}, Len, _State)
-  when (Type =:= channel_stats orelse
-        Type =:= queue_stats) andalso Len > ?DROP_LENGTH ->
-    case pget(idle_since, Props) of
-        unknown -> drop;
-        _       -> 0
-    end;
-prioritise_cast(_Msg, _Len, _State) ->
-    0.
-
-%% We want timely replies to queries even when overloaded, so return 5
-%% as priority. Also we only have access to the queue length here, not
-%% in handle_call/3, so stash it in the dictionary. This is a bit ugly
-%% but better than fiddling with gen_server2 even more.
-prioritise_call(_Msg, _From, Len, _State) ->
-    put(last_queue_length, Len),
-    5.
-
 %%----------------------------------------------------------------------------
 %% API
 %%----------------------------------------------------------------------------
 
 start_link() ->
-    Ref = make_ref(),
-    case gen_server2:start_link({global, ?MODULE}, ?MODULE, [Ref], []) of
+    case gen_server2:start_link({global, ?MODULE}, ?MODULE, [], []) of
         {ok, Pid} -> register(?MODULE, Pid), %% [1]
-                     rabbit:force_event_refresh(Ref),
                      {ok, Pid};
         Else      -> Else
     end.
@@ -242,9 +152,6 @@ get_all_consumers(V)        -> safe_call({get_all_consumers, V}).
 get_overview(User, R)       -> safe_call({get_overview, User, R}).
 get_overview(R)             -> safe_call({get_overview, all, R}).
 
-override_lookups(Lookups)   -> safe_call({override_lookups, Lookups}).
-reset_lookups()             -> safe_call(reset_lookups).
-
 safe_call(Term)          -> safe_call(Term, []).
 safe_call(Term, Default) -> safe_call(Term, Default, 1).
 
@@ -264,91 +171,82 @@ safe_call(Term, Default, Retries) ->
 %% Internal, gen_server2 callbacks
 %%----------------------------------------------------------------------------
 
-init([Ref]) ->
+-record(state, {interval}).
+
+init([]) ->
     %% When Rabbit is overloaded, it's usually especially important
     %% that the management plugin work.
     process_flag(priority, high),
     {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
-    {ok, RatesMode} = application:get_env(rabbitmq_management, rates_mode),
-    rabbit_node_monitor:subscribe(self()),
     rabbit_log:info("Statistics database started.~n"),
-    Table = fun () -> ets:new(rabbit_mgmt_db, [ordered_set]) end,
-    Tables = orddict:from_list([{Key, Table()} || Key <- ?TABLES]),
-    {ok, set_gc_timer(
-           reset_lookups(
-             #state{interval               = Interval,
-                    tables                 = Tables,
-                    old_stats              = Table(),
-                    aggregated_stats       = Table(),
-                    aggregated_stats_index = Table(),
-                    event_refresh_ref      = Ref,
-                    rates_mode             = RatesMode})), hibernate,
+    {ok, #state{interval = Interval}, hibernate,
      {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
 
-handle_call({augment_exchanges, Xs, Ranges, basic}, _From, State) ->
-    reply(list_exchange_stats(Ranges, Xs, State), State);
+handle_call({augment_exchanges, Xs, Ranges, basic}, _From,
+            #state{interval = Interval} = State) ->
+    reply(list_exchange_stats(Ranges, Xs, Interval), State);
 
-handle_call({augment_exchanges, Xs, Ranges, full}, _From, State) ->
-    reply(detail_exchange_stats(Ranges, Xs, State), State);
+handle_call({augment_exchanges, Xs, Ranges, full}, _From,
+            #state{interval = Interval} = State) ->
+    reply(detail_exchange_stats(Ranges, Xs, Interval), State);
 
-handle_call({augment_queues, Qs, Ranges, basic}, _From, State) ->
-    reply(list_queue_stats(Ranges, Qs, State), State);
+handle_call({augment_queues, Qs, Ranges, basic}, _From,
+            #state{interval = Interval} = State) ->
+    reply(list_queue_stats(Ranges, Qs, Interval), State);
 
-handle_call({augment_queues, Qs, Ranges, full}, _From, State) ->
-    reply(detail_queue_stats(Ranges, Qs, State), State);
+handle_call({augment_queues, Qs, Ranges, full}, _From,
+            #state{interval = Interval} = State) ->
+    reply(detail_queue_stats(Ranges, Qs, Interval), State);
 
-handle_call({augment_vhosts, VHosts, Ranges}, _From, State) ->
-    reply(vhost_stats(Ranges, VHosts, State), State);
+handle_call({augment_vhosts, VHosts, Ranges}, _From,
+            #state{interval = Interval} = State) ->
+    reply(vhost_stats(Ranges, VHosts, Interval), State);
 
-handle_call({augment_nodes, Nodes, Ranges}, _From, State) ->
-    {reply, node_stats(Ranges, Nodes, State), State};
+handle_call({augment_nodes, Nodes, Ranges}, _From,
+            #state{interval = Interval} = State) ->
+    {reply, node_stats(Ranges, Nodes, Interval), State};
 
 handle_call({get_channel, Name, Ranges}, _From,
-            State = #state{tables = Tables}) ->
-    case created_event(Name, channel_stats, Tables) of
+            #state{interval = Interval} = State) ->
+    case created_event(Name, channel_stats) of
         not_found -> reply(not_found, State);
-        Ch        -> [Result] = detail_channel_stats(Ranges, [Ch], State),
+        Ch        -> [Result] = detail_channel_stats(Ranges, [Ch], Interval),
                      reply(Result, State)
     end;
 
 handle_call({get_connection, Name, Ranges}, _From,
-            State = #state{tables = Tables}) ->
-    case created_event(Name, connection_stats, Tables) of
+            #state{interval = Interval} = State) ->
+    case created_event(Name, connection_stats) of
         not_found -> reply(not_found, State);
-        Conn      -> [Result] = connection_stats(Ranges, [Conn], State),
+        Conn      -> [Result] = connection_stats(Ranges, [Conn], Interval),
                      reply(Result, State)
     end;
 
 handle_call({get_all_channels, Ranges}, _From,
-            State = #state{tables = Tables}) ->
-    Chans = created_events(channel_stats, Tables),
-    reply(list_channel_stats(Ranges, Chans, State), State);
+            #state{interval = Interval} = State) ->
+    Chans = created_events(channel_stats),
+    reply(list_channel_stats(Ranges, Chans, Interval), State);
 
 handle_call({get_all_connections, Ranges}, _From,
-            State = #state{tables = Tables}) ->
-    Conns = created_events(connection_stats, Tables),
-    reply(connection_stats(Ranges, Conns, State), State);
-
-handle_call({get_all_consumers, VHost},
-            _From, State = #state{tables = Tables}) ->
-    All = ets:tab2list(orddict:fetch(consumers_by_queue, Tables)),
-    {reply, [augment_msg_stats(
-               augment_consumer(Obj), State) ||
-                {{#resource{virtual_host = VHostC}, _Ch, _CTag}, Obj} <- All,
-                VHost =:= all orelse VHost =:= VHostC], State};
+            #state{interval = Interval} = State) ->
+    Conns = created_events(connection_stats),
+    reply(connection_stats(Ranges, Conns, Interval), State);
+
+handle_call({get_all_consumers, VHost}, _From, State) ->
+    {reply, [augment_msg_stats(augment_consumer(Obj)) ||
+                Obj <- consumers_by_queue_and_vhost(VHost)], State};
 
 handle_call({get_overview, User, Ranges}, _From,
-            State = #state{tables = Tables}) ->
+            #state{interval = Interval} = State) ->
     VHosts = case User of
                  all -> rabbit_vhost:list();
                  _   -> rabbit_mgmt_util:list_visible_vhosts(User)
              end,
     %% TODO: there's no reason we can't do an overview of send_oct and
     %% recv_oct now!
-    VStats = [read_simple_stats(vhost_stats, VHost, State) ||
-                 VHost <- VHosts],
-    MessageStats = [overview_sum(Type, VStats) || Type <- ?MSG_RATES],
-    QueueStats = [overview_sum(Type, VStats) || Type <- ?QUEUE_MSG_COUNTS],
+    MessageStats = [overview_sum(Type, VHosts) ||
+                       Type <- [fine_stats, deliver_get, queue_msg_rates]],
+    QueueStats = [overview_sum(queue_msg_counts, VHosts)],
     F = case User of
             all -> fun (L) -> length(L) end;
             _   -> fun (L) -> length(rabbit_mgmt_util:filter_user(L, User)) end
@@ -356,8 +254,7 @@ handle_call({get_overview, User, Ranges}, _From,
     %% Filtering out the user's consumers would be rather expensive so let's
     %% just not show it
     Consumers = case User of
-                    all -> Table = orddict:fetch(consumers_by_queue, Tables),
-                           [{consumers, ets:info(Table, size)}];
+                    all -> [{consumers, ets:info(consumers_by_queue, size)}];
                     _   -> []
                 end,
     ObjectTotals = Consumers ++
@@ -365,52 +262,24 @@ handle_call({get_overview, User, Ranges}, _From,
                                     Q <- rabbit_amqqueue:list(V)])},
          {exchanges,   length([X || V <- VHosts,
                                     X <- rabbit_exchange:list(V)])},
-         {connections, F(created_events(connection_stats, Tables))},
-         {channels,    F(created_events(channel_stats, Tables))}],
-    reply([{message_stats, format_samples(Ranges, MessageStats, State)},
-           {queue_totals,  format_samples(Ranges, QueueStats, State)},
+         {connections, F(created_events(connection_stats))},
+         {channels,    F(created_events(channel_stats))}],
+    FormatMessage = format_samples(Ranges, MessageStats, Interval),
+    FormatQueue = format_samples(Ranges, QueueStats, Interval),
+    [rabbit_mgmt_stats:free(S) || {S, _, _} <- MessageStats],
+    [rabbit_mgmt_stats:free(S) || {S, _, _} <- QueueStats],
+    reply([{message_stats, FormatMessage},
+           {queue_totals,  FormatQueue},
            {object_totals, ObjectTotals},
-           {statistics_db_event_queue, get(last_queue_length)}], State);
-
-handle_call({override_lookups, Lookups}, _From, State) ->
-    reply(ok, State#state{lookups = Lookups});
-
-handle_call(reset_lookups, _From, State) ->
-    reply(ok, reset_lookups(State));
-
-%% Used in rabbit_mgmt_test_db where we need guarantees events have
-%% been handled before querying
-handle_call({event, Event = #event{reference = none}}, _From, State) ->
-    handle_event(Event, State),
-    reply(ok, State);
+           {statistics_db_event_queue, event_queue()}],
+          State);
 
 handle_call(_Request, _From, State) ->
     reply(not_understood, State).
 
-%% Only handle events that are real, or pertain to a force-refresh
-%% that we instigated.
-handle_cast({event, Event = #event{reference = none}}, State) ->
-    handle_event(Event, State),
-    noreply(State);
-
-handle_cast({event, Event = #event{reference = Ref}},
-            State = #state{event_refresh_ref = Ref}) ->
-    handle_event(Event, State),
-    noreply(State);
-
 handle_cast(_Request, State) ->
     noreply(State).
 
-handle_info(gc, State) ->
-    noreply(set_gc_timer(gc_batch(State)));
-
-handle_info({node_down, Node}, State = #state{tables = Tables}) ->
-    Conns = created_events(connection_stats, Tables),
-    Chs = created_events(channel_stats, Tables),
-    delete_all_from_node(connection_closed, Node, Conns, State),
-    delete_all_from_node(channel_closed, Node, Chs, State),
-    noreply(State);
-
 handle_info(_Info, State) ->
     noreply(State).
 
@@ -423,14 +292,6 @@ code_change(_OldVsn, State, _Extra) ->
 reply(Reply, NewState) -> {reply, Reply, NewState, hibernate}.
 noreply(NewState) -> {noreply, NewState, hibernate}.
 
-set_gc_timer(State) ->
-    TRef = erlang:send_after(?GC_INTERVAL, self(), gc),
-    State#state{gc_timer = TRef}.
-
-reset_lookups(State) ->
-    State#state{lookups = [{exchange, fun rabbit_exchange:lookup/1},
-                           {queue,    fun rabbit_amqqueue:lookup/1}]}.
-
 handle_pre_hibernate(State) ->
     %% rabbit_event can end up holding on to some memory after a busy
     %% workout, but it's not a gen_server so we can't make it
@@ -443,12 +304,6 @@ handle_pre_hibernate(State) ->
 
 format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
 
-delete_all_from_node(Type, Node, Items, State) ->
-    [case node(Pid) of
-         Node -> handle_event(#event{type = Type, props = [{pid, Pid}]}, State);
-         _    -> ok
-     end || Item <- Items, Pid <- [pget(pid, Item)]].
-
 %%----------------------------------------------------------------------------
 %% Internal, utilities
 %%----------------------------------------------------------------------------
@@ -484,503 +339,142 @@ lookup_element(Table, Key, Pos) ->
     catch error:badarg -> []
     end.
 
-fine_stats_id(ChPid, {Q, X}) -> {ChPid, Q, X};
-fine_stats_id(ChPid, QorX)   -> {ChPid, QorX}.
-
-floor(TS, #state{interval = Interval}) ->
-    rabbit_mgmt_util:floor(TS, Interval).
-ceil(TS, #state{interval = Interval}) ->
-    rabbit_mgmt_util:ceil (TS, Interval).
-
-details_key(Key) -> list_to_atom(atom_to_list(Key) ++ "_details").
-
-%%----------------------------------------------------------------------------
-%% Internal, event-receiving side
-%%----------------------------------------------------------------------------
-
-handle_event(#event{type = queue_stats, props = Stats, timestamp = Timestamp},
-             State) ->
-    handle_stats(queue_stats, Stats, Timestamp,
-                 [{fun rabbit_mgmt_format:properties/1,[backing_queue_status]},
-                  {fun rabbit_mgmt_format:now_to_str/1, [idle_since]},
-                  {fun rabbit_mgmt_format:queue_state/1, [state]}],
-                 ?QUEUE_MSG_COUNTS, ?QUEUE_MSG_RATES, State);
-
-handle_event(Event = #event{type = queue_deleted,
-                            props = [{name, Name}],
-                            timestamp = Timestamp},
-             State = #state{old_stats = OldTable}) ->
-    delete_consumers(Name, consumers_by_queue, consumers_by_channel, State),
-    %% This is fiddly. Unlike for connections and channels, we need to
-    %% decrease any amalgamated coarse stats for [messages,
-    %% messages_ready, messages_unacknowledged] for this queue - since
-    %% the queue's deletion means we have really got rid of messages!
-    Id = {coarse, {queue_stats, Name}},
-    %% This ceil must correspond to the ceil in append_samples/5
-    TS = ceil(Timestamp, State),
-    OldStats = lookup_element(OldTable, Id),
-    [record_sample(Id, {Key, -pget(Key, OldStats, 0), TS, State}, true, State)
-     || Key <- ?QUEUE_MSG_COUNTS],
-    delete_samples(channel_queue_stats,  {'_', Name}, State),
-    delete_samples(queue_exchange_stats, {Name, '_'}, State),
-    delete_samples(queue_stats,          Name,        State),
-    handle_deleted(queue_stats, Event, State);
-
-handle_event(Event = #event{type = exchange_deleted,
-                            props = [{name, Name}]}, State) ->
-    delete_samples(channel_exchange_stats,  {'_', Name}, State),
-    delete_samples(queue_exchange_stats,    {'_', Name}, State),
-    delete_samples(exchange_stats,          Name,        State),
-    handle_deleted(exchange_stats, Event, State);
-
-handle_event(#event{type = vhost_deleted,
-                    props = [{name, Name}]}, State) ->
-    delete_samples(vhost_stats, Name, State);
-
-handle_event(#event{type = connection_created, props = Stats}, State) ->
-    handle_created(
-      connection_stats, Stats,
-      [{fun rabbit_mgmt_format:addr/1,         [host, peer_host]},
-       {fun rabbit_mgmt_format:port/1,         [port, peer_port]},
-       {fun rabbit_mgmt_format:protocol/1,     [protocol]},
-       {fun rabbit_mgmt_format:amqp_table/1,   [client_properties]}], State);
-
-handle_event(#event{type = connection_stats, props = Stats,
-                    timestamp = Timestamp},
-             State) ->
-    handle_stats(connection_stats, Stats, Timestamp, [], ?COARSE_CONN_STATS,
-                 State);
-
-handle_event(Event = #event{type  = connection_closed,
-                            props = [{pid, Pid}]}, State) ->
-    delete_samples(connection_stats, Pid, State),
-    handle_deleted(connection_stats, Event, State);
-
-handle_event(#event{type = channel_created, props = Stats}, State) ->
-    handle_created(channel_stats, Stats, [], State);
-
-handle_event(#event{type = channel_stats, props = Stats, timestamp = Timestamp},
-             State = #state{old_stats = OldTable}) ->
-    handle_stats(channel_stats, Stats, Timestamp,
-                 [{fun rabbit_mgmt_format:now_to_str/1, [idle_since]}],
-                 [], State),
-    ChPid = id(channel_stats, Stats),
-    AllStats = [old_fine_stats(Type, Stats, State)
-                || Type <- ?FINE_STATS_TYPES],
-    ets:match_delete(OldTable, {{fine, {ChPid, '_'}},      '_'}),
-    ets:match_delete(OldTable, {{fine, {ChPid, '_', '_'}}, '_'}),
-    [handle_fine_stats(Timestamp, AllStatsElem, State)
-     || AllStatsElem <- AllStats];
-
-handle_event(Event = #event{type = channel_closed,
-                            props = [{pid, Pid}]},
-             State = #state{old_stats = Old}) ->
-    delete_consumers(Pid, consumers_by_channel, consumers_by_queue, State),
-    delete_samples(channel_queue_stats,    {Pid, '_'}, State),
-    delete_samples(channel_exchange_stats, {Pid, '_'}, State),
-    delete_samples(channel_stats,          Pid,        State),
-    handle_deleted(channel_stats, Event, State),
-    ets:match_delete(Old, {{fine, {Pid, '_'}},      '_'}),
-    ets:match_delete(Old, {{fine, {Pid, '_', '_'}}, '_'});
-
-handle_event(#event{type = consumer_created, props = Props}, State) ->
-    Fmt = [{fun rabbit_mgmt_format:amqp_table/1, [arguments]}],
-    handle_consumer(fun(Table, Id, P0) ->
-                            P = rabbit_mgmt_format:format(P0, Fmt),
-                            ets:insert(Table, {Id, P})
-                    end,
-                    Props, State);
-
-handle_event(#event{type = consumer_deleted, props = Props}, State) ->
-    handle_consumer(fun(Table, Id, _P) -> ets:delete(Table, Id) end,
-                    Props, State);
-
-%% TODO: we don't clear up after dead nodes here - this is a very tiny
-%% leak every time a node is permanently removed from the cluster. Do
-%% we care?
-handle_event(#event{type = node_stats, props = Stats0, timestamp = Timestamp},
-             State) ->
-    Stats = proplists:delete(persister_stats, Stats0) ++
-        pget(persister_stats, Stats0),
-    handle_stats(node_stats, Stats, Timestamp, [], ?COARSE_NODE_STATS, State);
-
-handle_event(#event{type = node_node_stats, props = Stats,
-                    timestamp = Timestamp}, State) ->
-    handle_stats(node_node_stats, Stats, Timestamp, [], ?COARSE_NODE_NODE_STATS,
-                 State);
-
-handle_event(Event = #event{type  = node_node_deleted,
-                            props = [{route, Route}]}, State) ->
-    delete_samples(node_node_stats, Route, State),
-    handle_deleted(node_node_stats, Event, State);
-
-handle_event(_Event, _State) ->
-    ok.
-
-handle_created(TName, Stats, Funs, State = #state{tables = Tables}) ->
-    Formatted = rabbit_mgmt_format:format(Stats, Funs),
-    ets:insert(orddict:fetch(TName, Tables), {{id(TName, Stats), create},
-                                              Formatted,
-                                              pget(name, Stats)}),
-    {ok, State}.
-
-handle_stats(TName, Stats, Timestamp, Funs, RatesKeys, State) ->
-    handle_stats(TName, Stats, Timestamp, Funs, RatesKeys, [], State).
-
-handle_stats(TName, Stats, Timestamp, Funs, RatesKeys, NoAggRatesKeys,
-             State = #state{tables = Tables, old_stats = OldTable}) ->
-    Id = id(TName, Stats),
-    IdSamples = {coarse, {TName, Id}},
-    OldStats = lookup_element(OldTable, IdSamples),
-    append_samples(
-      Stats, Timestamp, OldStats, IdSamples, RatesKeys, true, State),
-    append_samples(
-      Stats, Timestamp, OldStats, IdSamples, NoAggRatesKeys, false, State),
-    StripKeys = [id_name(TName)] ++ RatesKeys ++ ?FINE_STATS_TYPES,
-    Stats1 = [{K, V} || {K, V} <- Stats, not lists:member(K, StripKeys)],
-    Stats2 = rabbit_mgmt_format:format(Stats1, Funs),
-    ets:insert(orddict:fetch(TName, Tables), {{Id, stats}, Stats2, Timestamp}),
-    {ok, State}.
-
-handle_deleted(TName, #event{props = Props}, State = #state{tables    = Tables,
-                                                            old_stats = Old}) ->
-    Id = id(TName, Props),
-    case orddict:find(TName, Tables) of
-        {ok, Table} -> ets:delete(Table, {Id, create}),
-                       ets:delete(Table, {Id, stats});
-        error       -> ok
-    end,
-    ets:delete(Old, {coarse, {TName, Id}}),
-    {ok, State}.
-
-handle_consumer(Fun, Props, State = #state{tables = Tables}) ->
-    P = rabbit_mgmt_format:format(Props, []),
-    CTag = pget(consumer_tag, P),
-    Q    = pget(queue,        P),
-    Ch   = pget(channel,      P),
-    QTable  = orddict:fetch(consumers_by_queue,   Tables),
-    ChTable = orddict:fetch(consumers_by_channel, Tables),
-    Fun(QTable,  {Q, Ch, CTag}, P),
-    Fun(ChTable, {Ch, Q, CTag}, P),
-    {ok, State}.
-
-%% The consumer_deleted event is emitted by queues themselves -
-%% therefore in the event that a queue dies suddenly we may not get
-%% it. The best way to handle this is to make sure we also clean up
-%% consumers when we hear about any queue going down.
-delete_consumers(PrimId, PrimTableName, SecTableName,
-                 #state{tables = Tables}) ->
-    Table1 = orddict:fetch(PrimTableName, Tables),
-    Table2 = orddict:fetch(SecTableName, Tables),
-    SecIdCTags = ets:match(Table1, {{PrimId, '$1', '$2'}, '_'}),
-    ets:match_delete(Table1, {{PrimId, '_', '_'}, '_'}),
-    [ets:delete(Table2, {SecId, PrimId, CTag}) || [SecId, CTag] <- SecIdCTags].
-
-old_fine_stats(Type, Props, #state{old_stats = Old}) ->
-    case pget(Type, Props) of
-        unknown       -> ignore;
-        AllFineStats0 -> ChPid = id(channel_stats, Props),
-                         [begin
-                              Id = fine_stats_id(ChPid, Ids),
-                              {Id, Stats, lookup_element(Old, {fine, Id})}
-                          end || {Ids, Stats} <- AllFineStats0]
-    end.
-
-handle_fine_stats(_Timestamp, ignore, _State) ->
-    ok;
-
-handle_fine_stats(Timestamp, AllStats, State) ->
-    [handle_fine_stat(Id, Stats, Timestamp, OldStats, State) ||
-        {Id, Stats, OldStats} <- AllStats].
-
-handle_fine_stat(Id, Stats, Timestamp, OldStats, State) ->
-    Total = lists:sum([V || {K, V} <- Stats, lists:member(K, ?DELIVER_GET)]),
-    Stats1 = case Total of
-                 0 -> Stats;
-                 _ -> [{deliver_get, Total}|Stats]
-             end,
-    append_samples(Stats1, Timestamp, OldStats, {fine, Id}, all, true, State).
-
-delete_samples(Type, {Id, '_'}, State) ->
-    delete_samples_with_index(Type, Id, fun forward/2, State);
-delete_samples(Type, {'_', Id}, State) ->
-    delete_samples_with_index(Type, Id, fun reverse/2, State);
-delete_samples(Type, Id, #state{aggregated_stats = ETS}) ->
-    ets:match_delete(ETS, delete_match(Type, Id)).
-
-delete_samples_with_index(Type, Id, Order,
-                          #state{aggregated_stats       = ETS,
-                                 aggregated_stats_index = ETSi}) ->
-    Ids2 = lists:append(ets:match(ETSi, {{Type, Id, '$1'}})),
-    ets:match_delete(ETSi, {{Type, Id, '_'}}),
-    [begin
-         ets:match_delete(ETS, delete_match(Type, Order(Id, Id2))),
-         ets:match_delete(ETSi, {{Type, Id2, Id}})
-     end || Id2 <- Ids2].
-
-forward(A, B) -> {A, B}.
-reverse(A, B) -> {B, A}.
-
-delete_match(Type, Id) -> {{{Type, Id}, '_'}, '_'}.
-
-append_samples(Stats, TS, OldStats, Id, Keys, Agg,
-               State = #state{old_stats = OldTable}) ->
-    case ignore_coarse_sample(Id, State) of
-        false ->
-            %% This ceil must correspond to the ceil in handle_event
-            %% queue_deleted
-            NewMS = ceil(TS, State),
-            case Keys of
-                all -> [append_sample(K, V, NewMS, OldStats, Id, Agg, State)
-                        || {K, V} <- Stats];
-                _   -> [append_sample(K, V, NewMS, OldStats, Id, Agg, State)
-                        || K <- Keys,
-                           V <- [pget(K, Stats)],
-                           V =/= 0 orelse lists:member(K, ?ALWAYS_REPORT_STATS)]
-            end,
-            ets:insert(OldTable, {Id, Stats});
-        true ->
-            ok
-    end.
-
-append_sample(Key, Val, NewMS, OldStats, Id, Agg, State) when is_number(Val) ->
-    OldVal = case pget(Key, OldStats, 0) of
-        N when is_number(N) -> N;
-        _                   -> 0
-    end,
-    record_sample(Id, {Key, Val - OldVal, NewMS, State}, Agg, State),
-    ok;
-append_sample(_Key, _Value, _NewMS, _OldStats, _Id, _Agg, _State) ->
-    ok.
-
-ignore_coarse_sample({coarse, {queue_stats, Q}}, State) ->
-    not object_exists(Q, State);
-ignore_coarse_sample(_, _) ->
-    false.
-
-%% Node stats do not have a vhost of course
-record_sample({coarse, {node_stats, _Node} = Id}, Args, true, _State) ->
-    record_sample0(Id, Args);
-
-record_sample({coarse, {node_node_stats, _Names} = Id}, Args, true, _State) ->
-    record_sample0(Id, Args);
-
-record_sample({coarse, Id}, Args, false, _State) ->
-    record_sample0(Id, Args);
-
-record_sample({coarse, Id}, Args, true, State) ->
-    record_sample0(Id, Args),
-    record_sample0({vhost_stats, vhost(Id, State)}, Args);
-
-%% Deliveries / acks (Q -> Ch)
-record_sample({fine, {Ch, Q = #resource{kind = queue}}}, Args, true, State) ->
-    case object_exists(Q, State) of
-        true  -> record_sample0({channel_queue_stats, {Ch, Q}}, Args),
-                 record_sample0({queue_stats,         Q},       Args);
-        false -> ok
-    end,
-    record_sample0({channel_stats, Ch},       Args),
-    record_sample0({vhost_stats,   vhost(Q)}, Args);
-
-%% Publishes / confirms (Ch -> X)
-record_sample({fine, {Ch, X = #resource{kind = exchange}}}, Args, true,State) ->
-    case object_exists(X, State) of
-        true  -> record_sample0({channel_exchange_stats, {Ch, X}}, Args),
-                 record_sampleX(publish_in,              X,        Args);
-        false -> ok
-    end,
-    record_sample0({channel_stats, Ch},       Args),
-    record_sample0({vhost_stats,   vhost(X)}, Args);
-
-%% Publishes (but not confirms) (Ch -> X -> Q)
-record_sample({fine, {_Ch,
-                      Q = #resource{kind = queue},
-                      X = #resource{kind = exchange}}}, Args, true, State) ->
-    %% TODO This one logically feels like it should be here. It would
-    %% correspond to "publishing channel message rates to queue" -
-    %% which would be nice to handle - except we don't. And just
-    %% uncommenting this means it gets merged in with "consuming
-    %% channel delivery from queue" - which is not very helpful.
-    %% record_sample0({channel_queue_stats, {Ch, Q}}, Args),
-    QExists = object_exists(Q, State),
-    XExists = object_exists(X, State),
-    case QExists of
-        true  -> record_sample0({queue_stats,          Q},       Args);
-        false -> ok
-    end,
-    case QExists andalso XExists of
-        true  -> record_sample0({queue_exchange_stats, {Q,  X}}, Args);
-        false -> ok
-    end,
-    case XExists of
-        true  -> record_sampleX(publish_out,           X,        Args);
-        false -> ok
-    end.
-
-%% We have to check the queue and exchange objects still exist since
-%% their deleted event could be overtaken by a channel stats event
-%% which contains fine stats referencing them. That's also why we
-%% don't need to check the channels exist - their deleted event can't
-%% be overtaken by their own last stats event.
-%%
-%% Also, sometimes the queue_deleted event is not emitted by the queue
-%% (in the nodedown case) - so it can overtake the final queue_stats
-%% event (which is not *guaranteed* to be lost). So we make a similar
-%% check for coarse queue stats.
-%%
-%% We can be sure that mnesia will be up to date by the time we receive
-%% the event (even though we dirty read) since the deletions are
-%% synchronous and we do not emit the deleted event until after the
-%% deletion has occurred.
-object_exists(Name = #resource{kind = Kind}, #state{lookups = Lookups}) ->
-    case (pget(Kind, Lookups))(Name) of
-        {ok, _} -> true;
-        _       -> false
-    end.
-
-vhost(#resource{virtual_host = VHost}) -> VHost.
-
-vhost({queue_stats, #resource{virtual_host = VHost}}, _State) ->
-    VHost;
-vhost({TName, Pid}, #state{tables = Tables}) ->
-    Table = orddict:fetch(TName, Tables),
-    pget(vhost, lookup_element(Table, {Pid, create})).
-
-%% exchanges have two sets of "publish" stats, so rearrange things a touch
-record_sampleX(RenamePublishTo, X, {publish, Diff, TS, State}) ->
-    record_sample0({exchange_stats, X}, {RenamePublishTo, Diff, TS, State});
-record_sampleX(_RenamePublishTo, X, {Type, Diff, TS, State}) ->
-    record_sample0({exchange_stats, X}, {Type, Diff, TS, State}).
-
-%% Ignore case where ID1 and ID2 are in a tuple, i.e. detailed stats,
-%% when in basic mode
-record_sample0({Type, {_ID1, _ID2}}, {_, _, _, #state{rates_mode = basic}})
-  when Type =/= node_node_stats ->
-    ok;
-record_sample0(Id0, {Key, Diff, TS, #state{aggregated_stats       = ETS,
-                                           aggregated_stats_index = ETSi}}) ->
-    Id = {Id0, Key},
-    Old = case lookup_element(ETS, Id) of
-              [] -> case Id0 of
-                        {Type, {Id1, Id2}} ->
-                            ets:insert(ETSi, {{Type, Id2, Id1}}),
-                            ets:insert(ETSi, {{Type, Id1, Id2}});
-                        _ ->
-                            ok
-                    end,
-                    rabbit_mgmt_stats:blank();
-              E  -> E
-          end,
-    ets:insert(ETS, {Id, rabbit_mgmt_stats:record(TS, Diff, Old)}).
-
 %%----------------------------------------------------------------------------
 %% Internal, querying side
 %%----------------------------------------------------------------------------
 
 -define(QUEUE_DETAILS,
         {queue_stats, [{incoming,   queue_exchange_stats, fun first/1},
-                       {deliveries, channel_queue_stats,  fun second/1}]}).
+                       {deliveries, channel_queue_stats, fun second/1}]}).
 
 -define(EXCHANGE_DETAILS,
         {exchange_stats, [{incoming, channel_exchange_stats, fun second/1},
-                          {outgoing, queue_exchange_stats,   fun second/1}]}).
+                          {outgoing, queue_exchange_stats, fun second/1}]}).
 
 -define(CHANNEL_DETAILS,
         {channel_stats, [{publishes,  channel_exchange_stats, fun first/1},
-                         {deliveries, channel_queue_stats,    fun first/1}]}).
+                         {deliveries, channel_queue_stats, fun first/1}]}).
 
 -define(NODE_DETAILS,
         {node_stats, [{cluster_links, node_node_stats, fun first/1}]}).
 
-first(Id)  -> {Id, '$1'}.
-second(Id) -> {'$1', Id}.
+first(Id)  ->
+    {Id, '_'}.
+second(Id) ->
+    {'_', Id}.
 
-list_queue_stats(Ranges, Objs, State) ->
+list_queue_stats(Ranges, Objs, Interval) ->
     adjust_hibernated_memory_use(
-      merge_stats(Objs, queue_funs(Ranges, State))).
+      merge_queue_stats(Objs, queue_funs(Ranges, Interval))).
 
-detail_queue_stats(Ranges, Objs, State) ->
+detail_queue_stats(Ranges, Objs, Interval) ->
     adjust_hibernated_memory_use(
-      merge_stats(Objs, [consumer_details_fun(
+      merge_queue_stats(Objs,
+                        [consumer_details_fun(
                            fun (Props) -> id_lookup(queue_stats, Props) end,
-                           consumers_by_queue, State),
-                         detail_stats_fun(Ranges, ?QUEUE_DETAILS, State)
-                         | queue_funs(Ranges, State)])).
-
-queue_funs(Ranges, State) ->
-    [basic_stats_fun(queue_stats, State),
-     simple_stats_fun(Ranges, queue_stats, State),
-     augment_msg_stats_fun(State)].
-
-list_exchange_stats(Ranges, Objs, State) ->
-    merge_stats(Objs, [simple_stats_fun(Ranges, exchange_stats, State),
-                       augment_msg_stats_fun(State)]).
-
-detail_exchange_stats(Ranges, Objs, State) ->
-    merge_stats(Objs, [simple_stats_fun(Ranges, exchange_stats, State),
-                       detail_stats_fun(Ranges, ?EXCHANGE_DETAILS, State),
-                       augment_msg_stats_fun(State)]).
-
-connection_stats(Ranges, Objs, State) ->
-    merge_stats(Objs, [basic_stats_fun(connection_stats, State),
-                       simple_stats_fun(Ranges, connection_stats, State),
-                       augment_msg_stats_fun(State)]).
-
-list_channel_stats(Ranges, Objs, State) ->
-    merge_stats(Objs, [basic_stats_fun(channel_stats, State),
-                       simple_stats_fun(Ranges, channel_stats, State),
-                       augment_msg_stats_fun(State)]).
-
-detail_channel_stats(Ranges, Objs, State) ->
-    merge_stats(Objs, [basic_stats_fun(channel_stats, State),
-                       simple_stats_fun(Ranges, channel_stats, State),
+                           consumers_by_queue),
+                         detail_stats_fun(Ranges, ?QUEUE_DETAILS, Interval)
+                         | queue_funs(Ranges, Interval)])).
+
+queue_funs(Ranges, Interval) ->
+    [basic_stats_fun(queue_stats),
+     simple_stats_fun(Ranges, queue_stats, Interval),
+     augment_queue_msg_stats_fun()].
+
+list_exchange_stats(Ranges, Objs, Interval) ->
+    merge_stats(Objs, [simple_stats_fun(Ranges, exchange_stats, Interval),
+                       augment_msg_stats_fun()]).
+
+detail_exchange_stats(Ranges, Objs, Interval) ->
+    merge_stats(Objs, [simple_stats_fun(Ranges, exchange_stats, Interval),
+                       detail_stats_fun(Ranges, ?EXCHANGE_DETAILS, Interval),
+                       augment_msg_stats_fun()]).
+
+connection_stats(Ranges, Objs, Interval) ->
+    merge_stats(Objs, [basic_stats_fun(connection_stats),
+                       simple_stats_fun(Ranges, connection_stats, Interval),
+                       augment_msg_stats_fun()]).
+
+list_channel_stats(Ranges, Objs, Interval) ->
+    merge_stats(Objs, [basic_stats_fun(channel_stats),
+                       simple_stats_fun(Ranges, channel_stats, Interval),
+                       augment_msg_stats_fun()]).
+
+detail_channel_stats(Ranges, Objs, Interval) ->
+    merge_stats(Objs, [basic_stats_fun(channel_stats),
+                       simple_stats_fun(Ranges, channel_stats, Interval),
                        consumer_details_fun(
                          fun (Props) -> pget(pid, Props) end,
-                         consumers_by_channel, State),
-                       detail_stats_fun(Ranges, ?CHANNEL_DETAILS, State),
-                       augment_msg_stats_fun(State)]).
+                         consumers_by_channel),
+                       detail_stats_fun(Ranges, ?CHANNEL_DETAILS, Interval),
+                       augment_msg_stats_fun()]).
 
-vhost_stats(Ranges, Objs, State) ->
-    merge_stats(Objs, [simple_stats_fun(Ranges, vhost_stats, State)]).
+vhost_stats(Ranges, Objs, Interval) ->
+    merge_stats(Objs, [simple_stats_fun(Ranges, vhost_stats, Interval)]).
 
-node_stats(Ranges, Objs, State) ->
-    merge_stats(Objs, [basic_stats_fun(node_stats, State),
-                       simple_stats_fun(Ranges, node_stats, State),
+node_stats(Ranges, Objs, Interval) ->
+    merge_stats(Objs, [basic_stats_fun(node_stats),
+                       simple_stats_fun(Ranges, node_stats, Interval),
                        detail_and_basic_stats_fun(
-                         node_node_stats, Ranges, ?NODE_DETAILS, State)]).
+                         node_node_stats, Ranges, ?NODE_DETAILS, Interval)]).
 
 merge_stats(Objs, Funs) ->
-    [lists:foldl(fun (Fun, Props) -> combine(Fun(Props), Props) end, Obj, Funs)
-     || Obj <- Objs].
+    %% Don't pass the props to the Fun in combine, as it contains the results
+    %% from previous funs and:
+    %% * augment_msg_stats_fun() only needs the original object. Otherwise,
+    %%      must fold over a very longs list
+    %% * All other funs only require the Type that is in the original Obj
+    [combine_all_funs(Funs, Obj, Obj) || Obj <- Objs].
+
+combine_all_funs([Fun | Funs], Obj, Props) ->
+    combine_all_funs(Funs, Obj, combine(Fun(Obj), Props));
+combine_all_funs([], _Obj, Props) ->
+    Props.
+
+merge_queue_stats(Objs, Funs) ->
+    %% Don't pass the props to the Fun in combine, as it contains the results
+    %% from previous funs and:
+    %% * augment_msg_stats_fun() only needs the original object. Otherwise,
+    %%      must fold over a very longs list
+    %% * All other funs only require the Type that is in the original Obj
+    [begin
+         Pid = pget(pid, Obj),
+         {Pid, combine_all_funs(Funs, Obj, rabbit_mgmt_format:strip_queue_pids(Obj))}
+     end || Obj <- Objs].
 
 combine(New, Old) ->
     case pget(state, Old) of
         unknown -> New ++ Old;
-        live    -> New ++ proplists:delete(state, Old);
-        _       -> proplists:delete(state, New) ++ Old
+        live    -> New ++ lists:keydelete(state, 1, Old);
+        _       -> lists:keydelete(state, 1, New) ++ Old
     end.
 
 %% i.e. the non-calculated stats
-basic_stats_fun(Type, #state{tables = Tables}) ->
-    Table = orddict:fetch(Type, Tables),
+basic_stats_fun(Type) ->
     fun (Props) ->
             Id = id_lookup(Type, Props),
-            lookup_element(Table, {Id, stats})
+            lookup_element(Type, {Id, stats})
     end.
 
 %% i.e. coarse stats, and fine stats aggregated up to a single number per thing
-simple_stats_fun(Ranges, Type, State) ->
+simple_stats_fun(Ranges, Type, Interval) ->
+    {Msg, Other} = read_simple_stats(Type),
     fun (Props) ->
             Id = id_lookup(Type, Props),
-            extract_msg_stats(
-              format_samples(Ranges, read_simple_stats(Type, Id, State), State))
+            OtherStats = format_samples(Ranges, {Id, Other}, Interval),
+            case format_samples(Ranges, {Id, Msg}, Interval) of
+                [] ->
+                    OtherStats;
+                MsgStats ->
+                    [{message_stats, MsgStats} | OtherStats]
+            end
     end.
 
 %% i.e. fine stats that are broken out per sub-thing
-detail_stats_fun(Ranges, {IdType, FineSpecs}, State) ->
+detail_stats_fun(Ranges, {IdType, FineSpecs}, Interval) ->
     fun (Props) ->
             Id = id_lookup(IdType, Props),
-            [detail_stats(Ranges, Name, AggregatedStatsType, IdFun(Id), State)
+            [detail_stats(Ranges, Name, AggregatedStatsType, IdFun(Id), Interval)
              || {Name, AggregatedStatsType, IdFun} <- FineSpecs]
     end.
 
@@ -990,167 +484,197 @@ detail_stats_fun(Ranges, {IdType, FineSpecs}, State) ->
 %% only user of that is node-node stats.
 %%
 %% We also assume that FineSpecs is single length here (at [1]).
-detail_and_basic_stats_fun(Type, Ranges, {IdType, FineSpecs},
-                           State = #state{tables = Tables}) ->
-    Table = orddict:fetch(Type, Tables),
-    F = detail_stats_fun(Ranges, {IdType, FineSpecs}, State),
+detail_and_basic_stats_fun(Type, Ranges, {IdType, FineSpecs}, Interval) ->
+    F = detail_stats_fun(Ranges, {IdType, FineSpecs}, Interval),
     fun (Props) ->
             Id = id_lookup(IdType, Props),
-            BasicStatsRaw = ets:match(Table, {{{Id, '$1'}, stats}, '$2', '_'}),
-            BasicStatsDict = dict:from_list([{K, V} || [K,V] <- BasicStatsRaw]),
+            BasicStats = ets:select(Type, [{{{{'$1', '$2'}, '$3'}, '$4', '_'},
+                                               [{'==', '$1', Id},
+                                                {'==', '$3', stats}],
+                                               [{{'$2', '$4'}}]}]),
             [{K, Items}] = F(Props), %% [1]
-            Items2 = [case dict:find(id_lookup(IdType, Item), BasicStatsDict) of
-                          {ok, BasicStats} -> BasicStats ++ Item;
-                          error            -> Item
+            Items2 = [case lists:keyfind(id_lookup(IdType, Item), 1, BasicStats) of
+                          false -> Item;
+                          {_, BS} -> BS ++ Item
                       end || Item <- Items],
             [{K, Items2}]
     end.
 
-read_simple_stats(Type, Id, #state{aggregated_stats = ETS}) ->
-    FromETS = ets:match(ETS, {{{Type, Id}, '$1'}, '$2'}),
-    [{K, V} || [K, V] <- FromETS].
+read_simple_stats(EventType) ->
+    lists:partition(
+      fun({_, Type}) ->
+              lists:member(Type, [fine_stats, deliver_get, queue_msg_rates])
+      end, rabbit_mgmt_stats_tables:aggr_tables(EventType)).
 
-read_detail_stats(Type, Id, #state{aggregated_stats = ETS}) ->
-    %% Id must contain '$1'
-    FromETS = ets:match(ETS, {{{Type, Id}, '$2'}, '$3'}),
-    %% [[G, K, V]] -> [{G, [{K, V}]}] where G is Q/X/Ch, K is from
-    %% ?FINE_STATS and V is a stats tree
-    %% TODO does this need to be optimised?
+read_detail_stats(EventType, Id) ->
+    Tables = rabbit_mgmt_stats_tables:aggr_tables(EventType),
+    Keys =  [{Table, Type, Key} || {Table, Type} <- Tables,
+                                   Key <- rabbit_mgmt_stats:get_keys(Table, Id)],
     lists:foldl(
-      fun ([G, K, V], L) ->
-              case lists:keyfind(G, 1, L) of
-                  false    -> [{G, [{K, V}]} | L];
-                  {G, KVs} -> lists:keyreplace(G, 1, L, {G, [{K, V} | KVs]})
+      fun ({_Table, _Type, Id0} = Entry, L) ->
+              NewId = revert(Id, Id0),
+              case lists:keyfind(NewId, 1, L) of
+                      false    ->
+                      [{NewId, [Entry]} | L];
+                  {NewId, KVs} ->
+                      lists:keyreplace(NewId, 1, L, {NewId, [Entry | KVs]})
               end
-      end, [], FromETS).
-
-extract_msg_stats(Stats) ->
-    FineStats = lists:append([[K, details_key(K)] || K <- ?MSG_RATES]),
-    {MsgStats, Other} =
-        lists:partition(fun({K, _}) -> lists:member(K, FineStats) end, Stats),
-    case MsgStats of
-        [] -> Other;
-        _  -> [{message_stats, MsgStats} | Other]
-    end.
+      end, [], Keys).
+
+revert({'_', _}, {Id, _}) ->
+    Id;
+revert({_, '_'}, {_, Id}) ->
+    Id.
 
-detail_stats(Ranges, Name, AggregatedStatsType, Id, State) ->
+detail_stats(Ranges, Name, AggregatedStatsType, Id, Interval) ->
     {Name,
-     [[{stats, format_samples(Ranges, KVs, State)} | format_detail_id(G, State)]
-      || {G, KVs} <- read_detail_stats(AggregatedStatsType, Id, State)]}.
+     [[{stats, format_samples(Ranges, KVs, Interval)} | format_detail_id(G)]
+      || {G, KVs} <- read_detail_stats(AggregatedStatsType, Id)]}.
 
-format_detail_id(ChPid, State) when is_pid(ChPid) ->
-    augment_msg_stats([{channel, ChPid}], State);
-format_detail_id(#resource{name = Name, virtual_host = Vhost, kind = Kind},
-                 _State) ->
+format_detail_id(ChPid) when is_pid(ChPid) ->
+    augment_msg_stats([{channel, ChPid}]);
+format_detail_id(#resource{name = Name, virtual_host = Vhost, kind = Kind}) ->
     [{Kind, [{name, Name}, {vhost, Vhost}]}];
-format_detail_id(Node, _State) when is_atom(Node) ->
+format_detail_id(Node) when is_atom(Node) ->
     [{name, Node}].
 
-format_samples(Ranges, ManyStats, #state{interval = Interval}) ->
-    lists:append(
-      [case rabbit_mgmt_stats:is_blank(Stats) andalso
-           not lists:member(K, ?ALWAYS_REPORT_STATS) of
-           true  -> [];
-           false -> {Details, Counter} = rabbit_mgmt_stats:format(
-                                           pick_range(K, Ranges),
-                                           Stats, Interval),
-                    [{K,              Counter},
-                     {details_key(K), Details}]
-       end || {K, Stats} <- ManyStats]).
-
-pick_range(K, {RangeL, RangeM, RangeD, RangeN}) ->
-    case {lists:member(K, ?QUEUE_MSG_COUNTS),
-          lists:member(K, ?MSG_RATES),
-          lists:member(K, ?COARSE_CONN_STATS),
-          lists:member(K, ?COARSE_NODE_STATS)
-          orelse lists:member(K, ?COARSE_NODE_NODE_STATS)} of
-        {true, false, false, false} -> RangeL;
-        {false, true, false, false} -> RangeM;
-        {false, false, true, false} -> RangeD;
-        {false, false, false, true} -> RangeN
+format_samples(Ranges, {Id, ManyStats}, Interval) ->
+    lists:append(foldl_stats_format(ManyStats, Id, Ranges, Interval, []));
+format_samples(Ranges, ManyStats, Interval) ->
+    lists:append(foldl_stats_format(ManyStats, Ranges, Interval, [])).
+
+foldl_stats_format([{Table, Record} | T], Id, Ranges, Interval, Acc) ->
+    foldl_stats_format(T, Id, Ranges, Interval,
+                       stats_format(Table, Id, Record, Ranges, Interval, Acc));
+foldl_stats_format([], _Id, _Ranges, _Interval, Acc) ->
+    Acc.
+
+foldl_stats_format([{Table, Record, Id} | T], Ranges, Interval, Acc) ->
+    foldl_stats_format(T, Ranges, Interval,
+                       stats_format(Table, Id, Record, Ranges, Interval, Acc));
+foldl_stats_format([], _Ranges, _Interval, Acc) ->
+    Acc.
+
+stats_format(Table, Id, Record, Ranges, Interval, Acc) ->
+    case rabbit_mgmt_stats:is_blank(Table, Id, Record) of
+        true  ->
+            Acc;
+        false ->
+            [rabbit_mgmt_stats:format(pick_range(Record, Ranges),
+                                      Table, Id, Interval, Record) | Acc]
     end.
 
+pick_range(queue_msg_counts, {RangeL, _RangeM, _RangeD, _RangeN}) ->
+    RangeL;
+pick_range(K, {_RangeL, RangeM, _RangeD, _RangeN}) when K == fine_stats;
+                                                        K == deliver_get;
+                                                        K == queue_msg_rates ->
+    RangeM;
+pick_range(K, {_RangeL, _RangeM, RangeD, _RangeN}) when K == coarse_conn_stats;
+                                                        K == process_stats ->
+    RangeD;
+pick_range(K, {_RangeL, _RangeM, _RangeD, RangeN})
+  when K == coarse_node_stats;
+       K == coarse_node_node_stats ->
+    RangeN.
+
 %% We do this when retrieving the queue record rather than when
 %% storing it since the memory use will drop *after* we find out about
 %% hibernation, so to do it when we receive a queue stats event would
 %% be fiddly and racy. This should be quite cheap though.
 adjust_hibernated_memory_use(Qs) ->
-    Pids = [pget(pid, Q) ||
-               Q <- Qs, pget(idle_since, Q, not_idle) =/= not_idle],
+    Pids = [Pid || {Pid, Q} <- Qs, pget(idle_since, Q, not_idle) =/= not_idle],
     %% We use delegate here not for ordering reasons but because we
     %% want to get the right amount of parallelism and minimise
     %% cross-cluster communication.
     {Mem, _BadNodes} = delegate:invoke(Pids, {erlang, process_info, [memory]}),
     MemDict = dict:from_list([{P, M} || {P, M = {memory, _}} <- Mem]),
-    [case dict:find(pget(pid, Q), MemDict) of
+    [case dict:find(Pid, MemDict) of
          error        -> Q;
          {ok, Memory} -> [Memory|proplists:delete(memory, Q)]
-     end || Q <- Qs].
-
-created_event(Name, Type, Tables) ->
-    Table = orddict:fetch(Type, Tables),
-    case ets:match(Table, {{'$1', create}, '_', Name}) of
-        []     -> not_found;
-        [[Id]] -> lookup_element(Table, {Id, create})
+     end || {Pid, Q} <- Qs].
+
+created_event(Name, Type) ->
+    case ets:select(Type, [{{{'_', '$1'}, '$2', '$3'}, [{'==', 'create', '$1'},
+                                                        {'==', Name, '$3'}],
+                            ['$2']}]) of
+        [] -> not_found;
+        [Elem] -> Elem
     end.
 
-created_events(Type, Tables) ->
-    [Facts || {{_, create}, Facts, _Name}
-                  <- ets:tab2list(orddict:fetch(Type, Tables))].
+created_events(Type) ->
+    ets:select(Type, [{{{'_', '$1'}, '$2', '_'}, [{'==', 'create', '$1'}],
+                       ['$2']}]).
+
+consumers_by_queue_and_vhost(VHost) ->
+    ets:select(consumers_by_queue,
+               [{{{#resource{virtual_host = '$1', _ = '_'}, '_', '_'}, '$2'},
+                 [{'orelse', {'==', 'all', VHost}, {'==', VHost, '$1'}}],
+                 ['$2']}]).
 
-consumer_details_fun(KeyFun, TableName, State = #state{tables = Tables}) ->
-    Table = orddict:fetch(TableName, Tables),
+consumer_details_fun(KeyFun, TableName) ->
     fun ([])    -> [];
         (Props) -> Pattern = {KeyFun(Props), '_', '_'},
                    [{consumer_details,
-                     [augment_msg_stats(augment_consumer(Obj), State)
+                     [augment_msg_stats(augment_consumer(Obj))
                       || Obj <- lists:append(
-                                  ets:match(Table, {Pattern, '$1'}))]}]
+                                  ets:match(TableName, {Pattern, '$1'}))]}]
     end.
 
 augment_consumer(Obj) ->
     [{queue, rabbit_mgmt_format:resource(pget(queue, Obj))} |
-     proplists:delete(queue, Obj)].
+     lists:keydelete(queue, 1, Obj)].
 
 %%----------------------------------------------------------------------------
 %% Internal, query-time summing for overview
 %%----------------------------------------------------------------------------
 
-overview_sum(Type, VHostStats) ->
-    Stats = [pget(Type, VHost, rabbit_mgmt_stats:blank())
-             || VHost <- VHostStats],
-    {Type, rabbit_mgmt_stats:sum(Stats)}.
+overview_sum(Type, VHosts) ->
+    Stats = [{rabbit_mgmt_stats_tables:aggr_table(vhost_stats, Type), VHost}
+             || VHost <- VHosts],
+    {rabbit_mgmt_stats:sum(Stats), Type, all}.
 
 %%----------------------------------------------------------------------------
 %% Internal, query-time augmentation
 %%----------------------------------------------------------------------------
 
-augment_msg_stats(Props, State) ->
+augment_msg_stats(Props) ->
     rabbit_mgmt_format:strip_pids(
-      (augment_msg_stats_fun(State))(Props) ++ Props).
-
-augment_msg_stats_fun(State) ->
-    Funs = [{connection, fun augment_connection_pid/2},
-            {channel,    fun augment_channel_pid/2},
-            {owner_pid,  fun augment_connection_pid/2}],
-    fun (Props) -> augment(Props, Funs, State) end.
-
-augment(Items, Funs, State) ->
-    Augmented = [augment(K, Items, Fun, State) || {K, Fun} <- Funs],
-    [{K, V} || {K, V} <- Augmented, V =/= unknown].
-
-augment(K, Items, Fun, State) ->
-    Key = details_key(K),
-    case pget(K, Items) of
-        none    -> {Key, unknown};
-        unknown -> {Key, unknown};
-        Id      -> {Key, Fun(Id, State)}
+      (augment_msg_stats_fun())(Props) ++ Props).
+
+augment_msg_stats_fun() ->
+    fun(Props) ->
+            augment_details(Props, [])
+    end.
+
+augment_details([{_, none} | T], Acc) ->
+    augment_details(T, Acc);
+augment_details([{_, unknown} | T], Acc) ->
+    augment_details(T, Acc);
+augment_details([{connection, Value} | T], Acc) ->
+    augment_details(T, [{connection_details, augment_connection_pid(Value)} | Acc]);
+augment_details([{channel, Value} | T], Acc) ->
+    augment_details(T, [{channel_details, augment_channel_pid(Value)} | Acc]);
+augment_details([{owner_pid, Value} | T], Acc) ->
+    augment_details(T, [{owner_pid_details, augment_connection_pid(Value)} | Acc]);
+augment_details([_ | T], Acc) ->
+    augment_details(T, Acc);
+augment_details([], Acc) ->
+    Acc.
+
+augment_queue_msg_stats_fun() ->
+    fun(Props) ->
+            case lists:keyfind(owner_pid, 1, Props) of
+                {owner_pid, Value} when is_pid(Value) ->
+                    [{owner_pid_details, augment_connection_pid(Value)}];
+                _ ->
+                    []
+            end
     end.
 
-augment_channel_pid(Pid, #state{tables = Tables}) ->
-    Ch = lookup_element(orddict:fetch(channel_stats, Tables),
-                        {Pid, create}),
-    Conn = lookup_element(orddict:fetch(connection_stats, Tables),
+augment_channel_pid(Pid) ->
+    Ch = lookup_element(channel_stats, {Pid, create}),
+    Conn = lookup_element(connection_stats,
                           {pget(connection, Ch), create}),
     [{name,            pget(name,   Ch)},
      {number,          pget(number, Ch)},
@@ -1159,57 +683,20 @@ augment_channel_pid(Pid, #state{tables = Tables}) ->
      {peer_port,       pget(peer_port,    Conn)},
      {peer_host,       pget(peer_host,    Conn)}].
 
-augment_connection_pid(Pid, #state{tables = Tables}) ->
-    Conn = lookup_element(orddict:fetch(connection_stats, Tables),
-                          {Pid, create}),
+augment_connection_pid(Pid) ->
+    Conn = lookup_element(connection_stats, {Pid, create}),
     [{name,         pget(name,         Conn)},
      {peer_port,    pget(peer_port,    Conn)},
      {peer_host,    pget(peer_host,    Conn)}].
 
-%%----------------------------------------------------------------------------
-%% Internal, event-GCing
-%%----------------------------------------------------------------------------
-
-gc_batch(State = #state{aggregated_stats = ETS}) ->
-    {ok, Policies} = application:get_env(
-                       rabbitmq_management, sample_retention_policies),
-    Rows = erlang:max(?GC_MIN_ROWS,
-                      round(?GC_MIN_RATIO * ets:info(ETS, size))),
-    gc_batch(Rows, Policies, State).
-
-gc_batch(0, _Policies, State) ->
-    State;
-gc_batch(Rows, Policies, State = #state{aggregated_stats = ETS,
-                                        gc_next_key      = Key0}) ->
-    Key = case Key0 of
-              undefined -> ets:first(ETS);
-              _         -> ets:next(ETS, Key0)
-          end,
-    Key1 = case Key of
-               '$end_of_table' -> undefined;
-               _               -> Now = floor(
-                                    time_compat:os_system_time(milli_seconds),
-                                    State),
-                                  Stats = ets:lookup_element(ETS, Key, 2),
-                                  gc(Key, Stats, Policies, Now, ETS),
-                                  Key
-           end,
-    gc_batch(Rows - 1, Policies, State#state{gc_next_key = Key1}).
-
-gc({{Type, Id}, Key}, Stats, Policies, Now, ETS) ->
-    Policy = pget(retention_policy(Type), Policies),
-    case rabbit_mgmt_stats:gc({Policy, Now}, Stats) of
-        Stats  -> ok;
-        Stats2 -> ets:insert(ETS, {{{Type, Id}, Key}, Stats2})
-    end.
-
-retention_policy(node_stats)             -> global;
-retention_policy(node_node_stats)        -> global;
-retention_policy(vhost_stats)            -> global;
-retention_policy(queue_stats)            -> basic;
-retention_policy(exchange_stats)         -> basic;
-retention_policy(connection_stats)       -> basic;
-retention_policy(channel_stats)          -> basic;
-retention_policy(queue_exchange_stats)   -> detailed;
-retention_policy(channel_exchange_stats) -> detailed;
-retention_policy(channel_queue_stats)    -> detailed.
+event_queue() ->
+    {message_queue_len, Q0} =
+        erlang:process_info(whereis(rabbit_mgmt_event_collector),
+                            message_queue_len),
+    {message_queue_len, Q1} =
+        erlang:process_info(whereis(rabbit_mgmt_queue_stats_collector),
+                            message_queue_len),
+    {message_queue_len, Q2} =
+        erlang:process_info(whereis(rabbit_mgmt_channel_stats_collector),
+                            message_queue_len),
+    Q0 + Q1 + Q2.
index 8104f78381210f503ab435293b3a9c7e2d56bd66..53b05ae7f66bd5456857f2bf390f348b2d161bc1 100644 (file)
@@ -41,6 +41,12 @@ dispatcher() ->
      {["cluster-name"],                                            rabbit_mgmt_wm_cluster_name, []},
      {["nodes"],                                                   rabbit_mgmt_wm_nodes, []},
      {["nodes", node],                                             rabbit_mgmt_wm_node, []},
+     {["nodes", node, "memory"],                                   rabbit_mgmt_wm_node_memory, [absolute]},
+     {["nodes", node, "memory", "relative"],                       rabbit_mgmt_wm_node_memory, [relative]},
+     {["nodes", node, "memory", "ets"],                            rabbit_mgmt_wm_node_memory_ets, [absolute]},
+     {["nodes", node, "memory", "ets", "relative"],                rabbit_mgmt_wm_node_memory_ets, [relative]},
+     {["nodes", node, "memory", "ets", filter],                    rabbit_mgmt_wm_node_memory_ets, [absolute]},
+     {["nodes", node, "memory", "ets", filter, "relative"],        rabbit_mgmt_wm_node_memory_ets, [relative]},
      {["extensions"],                                              rabbit_mgmt_wm_extensions, []},
      {["all-configuration"],                                       rabbit_mgmt_wm_definitions, []}, %% This was the old name, let's not break things gratuitously.
      {["definitions"],                                             rabbit_mgmt_wm_definitions, []},
@@ -89,5 +95,7 @@ dispatcher() ->
      {["whoami"],                                                  rabbit_mgmt_wm_whoami, []},
      {["permissions"],                                             rabbit_mgmt_wm_permissions, []},
      {["permissions", vhost, user],                                rabbit_mgmt_wm_permission, []},
-     {["aliveness-test", vhost],                                   rabbit_mgmt_wm_aliveness_test, []}
+     {["aliveness-test", vhost],                                   rabbit_mgmt_wm_aliveness_test, []},
+     {["healthchecks", "node"],                                    rabbit_mgmt_wm_healthchecks, []},
+     {["healthchecks", "node", node],                              rabbit_mgmt_wm_healthchecks, []}
     ].
diff --git a/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_event_collector.erl b/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_event_collector.erl
new file mode 100644 (file)
index 0000000..a798453
--- /dev/null
@@ -0,0 +1,165 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ.
+%%
+%%   The Initial Developer of the Original Code is Pivotal Software, Inc.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_event_collector).
+
+-include("rabbit_mgmt.hrl").
+-include("rabbit_mgmt_metrics.hrl").
+-include("rabbit_mgmt_event_collector.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+         code_change/3, handle_pre_hibernate/1]).
+
+%% For testing
+-export([override_lookups/1, reset_lookups/0]).
+
+-import(rabbit_mgmt_db, [pget/2]).
+
+%% See the comment on rabbit_mgmt_db for the explanation of
+%% events and stats.
+
+%% Although this gen_server could process all types of events through the
+%% handle_cast, rabbit_mgmt_db_handler (in the management agent) forwards
+%% the non-prioritiy events channel_stats and queue_stats to their own gen_servers
+
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+start_link() ->
+    Ref = make_ref(),
+    case gen_server2:start_link({global, ?MODULE}, ?MODULE, [Ref], []) of
+        {ok, Pid} -> register(?MODULE, Pid), %% [1]
+                     rabbit:force_event_refresh(Ref),
+                     {ok, Pid};
+        Else      -> Else
+    end.
+%% [1] For debugging it's helpful to locally register the name too
+%% since that shows up in places global names don't.
+
+override_lookups(Lookups) ->
+    gen_server2:call({global, ?MODULE}, {override_lookups, Lookups}, infinity).
+reset_lookups() ->
+    gen_server2:call({global, ?MODULE}, reset_lookups, infinity).
+
+%%----------------------------------------------------------------------------
+%% Internal, gen_server2 callbacks
+%%----------------------------------------------------------------------------
+
+init([Ref]) ->
+    %% When Rabbit is overloaded, it's usually especially important
+    %% that the management plugin work.
+    process_flag(priority, high),
+    {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
+    {ok, RatesMode} = application:get_env(rabbitmq_management, rates_mode),
+    rabbit_node_monitor:subscribe(self()),
+    rabbit_log:info("Statistics event collector started.~n"),
+    ?TABLES = [ets:new(Key, [public, set, named_table]) || Key <- ?TABLES],
+    %% Index for cleaning up stats of abnormally terminated processes.
+    [ets:new(rabbit_mgmt_stats_tables:key_index(Table),
+             [ordered_set, public, named_table]) || Table <- ?PROC_STATS_TABLES],
+    %% Index for the deleting of fine stats, reduces the number of reductions
+    %% to 1/8 under heavy load.
+    ets:new(old_stats_fine_index, [bag, public, named_table]),
+    ?AGGR_TABLES = [rabbit_mgmt_stats:blank(Name) || Name <- ?AGGR_TABLES],
+    {ok, reset_lookups(
+           #state{interval               = Interval,
+                  event_refresh_ref      = Ref,
+                  rates_mode             = RatesMode}), hibernate,
+     {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+%% Used in rabbit_mgmt_test_db where we need guarantees events have
+%% been handled before querying
+handle_call({event, Event = #event{reference = none}}, _From, State) ->
+    rabbit_mgmt_event_collector_utils:handle_event(Event, State),
+    reply(ok, State);
+
+handle_call({override_lookups, Lookups}, _From, State) ->
+    reply(ok, State#state{lookups = Lookups});
+
+handle_call(reset_lookups, _From, State) ->
+    reply(ok, reset_lookups(State));
+
+handle_call(_Request, _From, State) ->
+    reply(not_understood, State).
+
+%% Only handle events that are real, or pertain to a force-refresh
+%% that we instigated.
+handle_cast({event, Event = #event{reference = none}}, State) ->
+    rabbit_mgmt_event_collector_utils:handle_event(Event, State),
+    noreply(State);
+
+handle_cast({event, Event = #event{reference = Ref}},
+            State = #state{event_refresh_ref = Ref}) ->
+    rabbit_mgmt_event_collector_utils:handle_event(Event, State),
+    noreply(State);
+
+handle_cast(_Request, State) ->
+    noreply(State).
+
+handle_info({node_down, Node}, State) ->
+    Conns = created_events(connection_stats),
+    Chs = created_events(channel_stats),
+    delete_all_from_node(connection_closed, Node, Conns, State),
+    delete_all_from_node(channel_closed, Node, Chs, State),
+    noreply(State);
+
+handle_info(_Info, State) ->
+    noreply(State).
+
+terminate(_Arg, _State) ->
+    ok.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+reply(Reply, NewState) -> {reply, Reply, NewState, hibernate}.
+noreply(NewState) -> {noreply, NewState, hibernate}.
+
+reset_lookups(State) ->
+    State#state{lookups = [{exchange, fun rabbit_exchange:lookup/1},
+                           {queue,    fun rabbit_amqqueue:lookup/1}]}.
+
+handle_pre_hibernate(State) ->
+    %% rabbit_event can end up holding on to some memory after a busy
+    %% workout, but it's not a gen_server so we can't make it
+    %% hibernate. The best we can do is forcibly GC it here (if
+    %% rabbit_mgmt_db is hibernating the odds are rabbit_event is
+    %% quiescing in some way too).
+    rpc:multicall(
+      rabbit_mnesia:cluster_nodes(running), rabbit_mgmt_db_handler, gc, []),
+    {hibernate, State}.
+
+delete_all_from_node(Type, Node, [Item | Items], State) ->
+    Pid = pget(pid, Item),
+    case node(Pid) of
+        Node ->
+            rabbit_mgmt_event_collector_utils:handle_event(
+              #event{type = Type, props = [{pid, Pid}]}, State);
+        _    -> ok
+    end,
+    delete_all_from_node(Type, Node, Items, State);
+delete_all_from_node(_Type, _Node, [], _State) ->
+    ok.
+
+created_events(Table) ->
+    ets:select(Table, [{{{'_', '$1'}, '$2', '_'}, [{'==', 'create', '$1'}],
+                        ['$2']}]).
diff --git a/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_event_collector_utils.erl b/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_event_collector_utils.erl
new file mode 100644 (file)
index 0000000..d7d2ee1
--- /dev/null
@@ -0,0 +1,551 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ.
+%%
+%%   The Initial Developer of the Original Code is Pivotal Software, Inc.
+%%   Copyright (c) 2010-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_event_collector_utils).
+
+-include("rabbit_mgmt_metrics.hrl").
+-include("rabbit_mgmt_event_collector.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([handle_event/2]).
+
+-import(rabbit_misc, [pget/3]).
+-import(rabbit_mgmt_db, [pget/2, id_name/1, id/2, lookup_element/2]).
+
+%%----------------------------------------------------------------------------
+%% External functions
+%%----------------------------------------------------------------------------
+
+%%------------------------------------------------------------------------------        %% @doc Handles events from any collector.
+%%
+%% All the gen_server of the collectors have the same internal state record,
+%% which contains the interval, lookups and rate_mode required
+%% by this function. Apart from the lookups that can be modified by the
+%% tests, the rest of the state doesn't change after startup.
+%%
+%% Ideally, the gen_server should pass only the required parameters and not the
+%% full state. However, this simplified the refactor and avoided changing all
+%% internal functions.
+%%
+%% @end
+%%------------------------------------------------------------------------------ 
+-spec handle_event(#event{}, #state{}) -> ok.
+handle_event(#event{type = queue_stats, props = Stats, timestamp = Timestamp},
+             State) ->
+    handle_stats(queue_stats, Stats, Timestamp,
+                 {fun rabbit_mgmt_format:format_queue_stats/1, false},
+                 ?QUEUE_MSG_COUNTS, ?QUEUE_MSG_RATES ++ ?PROCESS_STATS, State);
+
+handle_event(Event = #event{type = queue_deleted,
+                            props = [{name, Name}],
+                            timestamp = Timestamp},
+             State) ->
+    delete_consumers(Name, consumers_by_queue, consumers_by_channel),
+    %% This is fiddly. Unlike for connections and channels, we need to
+    %% decrease any amalgamated coarse stats for [messages,
+    %% messages_ready, messages_unacknowledged] for this queue - since
+    %% the queue's deletion means we have really got rid of messages!
+    Id = {coarse, {queue_stats, Name}},
+    %% This ceil must correspond to the ceil in append_samples/5
+    TS = ceil(Timestamp, State),
+    OldStats = lookup_element(old_stats, Id),
+    record_sample_list(Id, OldStats, TS, State, ?QUEUE_MSG_COUNTS),
+    delete_samples(channel_queue_stats,  {'_', Name}),
+    delete_samples(queue_exchange_stats, {Name, '_'}),
+    delete_samples(queue_stats,          Name),
+    handle_deleted(queue_stats, Event);
+
+handle_event(Event = #event{type = exchange_deleted,
+                            props = [{name, Name}]}, _State) ->
+    delete_samples(channel_exchange_stats,  {'_', Name}),
+    delete_samples(queue_exchange_stats,    {'_', Name}),
+    delete_samples(exchange_stats,          Name),
+    handle_deleted(exchange_stats, Event);
+
+handle_event(#event{type = vhost_deleted,
+                    props = [{name, Name}]}, _State) ->
+    delete_samples(vhost_stats, Name);
+
+handle_event(#event{type = connection_created, props = Stats}, _State) ->
+    handle_created(
+      connection_stats, Stats,
+      {fun rabbit_mgmt_format:format_connection_created/1, true});
+
+handle_event(#event{type = connection_stats, props = Stats,
+                    timestamp = Timestamp},
+             State) ->
+    handle_stats(connection_stats, Stats, Timestamp, {[], false},
+                 ?COARSE_CONN_STATS, ?PROCESS_STATS, State);
+
+handle_event(Event = #event{type  = connection_closed,
+                            props = [{pid, Pid}]}, _State) ->
+    delete_samples(connection_stats, Pid),
+    handle_deleted(connection_stats, Event);
+
+handle_event(#event{type = channel_created, props = Stats}, _State) ->
+    handle_created(channel_stats, Stats, {[], false});
+
+handle_event(#event{type = channel_stats, props = Stats, timestamp = Timestamp},
+             State) ->
+    handle_stats(channel_stats, Stats, Timestamp,
+                 {fun rabbit_mgmt_format:format_channel_stats/1, true},
+                 [], ?PROCESS_STATS, State),
+    ChPid = id(channel_stats, Stats),
+    AllStats = [old_fine_stats(ChPid, Type, Stats)
+                || Type <- ?FINE_STATS_TYPES],
+    Objs = ets:lookup(old_stats_fine_index, ChPid),
+    ets:delete(old_stats_fine_index, ChPid),
+    [ets:delete(old_stats, Key) || {_, Key} <- Objs],
+    %% This ceil must correspond to the ceil in handle_event
+    %% queue_deleted
+    handle_fine_stats_list(ChPid, ceil(Timestamp, State), State, AllStats);
+
+handle_event(Event = #event{type = channel_closed,
+                            props = [{pid, Pid}]},
+             _State) ->
+    delete_consumers(Pid, consumers_by_channel, consumers_by_queue),
+    delete_samples(channel_queue_stats,    {Pid, '_'}),
+    delete_samples(channel_exchange_stats, {Pid, '_'}),
+    delete_samples(channel_stats,          Pid),
+    handle_deleted(channel_stats, Event),
+    Objs = ets:lookup(old_stats_fine_index, Pid),
+    ets:delete(old_stats_fine_index, Pid),
+    [ets:delete(old_stats, Key) || {_, Key} <- Objs];
+
+handle_event(#event{type = consumer_created, props = Props}, _State) ->
+    Fmt = {fun rabbit_mgmt_format:format_arguments/1, true},
+    handle_consumer(fun(Table, Id, P0) ->
+                            P = rabbit_mgmt_format:format(P0, Fmt),
+                            ets:insert(Table, {Id, P})
+                    end,
+                    Props);
+
+handle_event(#event{type = consumer_deleted, props = Props}, _State) ->
+    handle_consumer(fun(Table, Id, _P) -> ets:delete(Table, Id) end,
+                    Props);
+
+%% TODO: we don't clear up after dead nodes here - this is a very tiny
+%% leak every time a node is permanently removed from the cluster. Do
+%% we care?
+handle_event(#event{type = node_stats, props = Stats0, timestamp = Timestamp},
+             State) ->
+    Stats = proplists:delete(persister_stats, Stats0) ++
+        pget(persister_stats, Stats0),
+    handle_stats(node_stats, Stats, Timestamp, {[], false}, ?COARSE_NODE_STATS, State);
+
+handle_event(#event{type = node_node_stats, props = Stats,
+                    timestamp = Timestamp}, State) ->
+    handle_stats(node_node_stats, Stats, Timestamp, {[], false}, ?COARSE_NODE_NODE_STATS,
+                 State);
+
+handle_event(Event = #event{type  = node_node_deleted,
+                            props = [{route, Route}]}, _State) ->
+    delete_samples(node_node_stats, Route),
+    handle_deleted(node_node_stats, Event);
+
+handle_event(_Event, _State) ->
+    ok.
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+handle_stats(TName, Stats, Timestamp, Funs, RatesKeys, State) ->
+    handle_stats(TName, Stats, Timestamp, Funs, RatesKeys, [], State).
+
+handle_stats(TName, Stats, Timestamp, Funs, RatesKeys, NoAggRatesKeys,
+             State) ->
+    Id = id(TName, Stats),
+    IdSamples = {coarse, {TName, Id}},
+    OldStats = lookup_element(old_stats, IdSamples),
+    append_set_of_samples(
+      Stats, Timestamp, OldStats, IdSamples, RatesKeys, NoAggRatesKeys, State),
+    StripKeys = [id_name(TName)] ++ RatesKeys ++ ?FINE_STATS_TYPES,
+    Stats1 = [{K, V} || {K, V} <- Stats, not lists:member(K, StripKeys),
+                        V =/= unknown],
+    Stats2 = rabbit_mgmt_format:format(Stats1, Funs),
+    ets:insert(TName, {{Id, stats}, Stats2, Timestamp}),
+    ok.
+
+fine_stats_id(ChPid, {Q, X}) -> {ChPid, Q, X};
+fine_stats_id(ChPid, QorX)   -> {ChPid, QorX}.
+
+ceil(TS, #state{interval = Interval}) ->
+    rabbit_mgmt_util:ceil(TS, Interval).
+
+handle_created(TName, Stats, Funs) ->
+    Formatted = rabbit_mgmt_format:format(Stats, Funs),
+    Id = id(TName, Stats),
+    ets:insert(TName, {{Id, create}, Formatted, pget(name, Stats)}),
+    case lists:member(TName, ?PROC_STATS_TABLES) of
+        true  -> ets:insert(rabbit_mgmt_stats_tables:key_index(TName), {Id});
+        false -> true
+    end.
+
+handle_deleted(TName, #event{props = Props}) ->
+    Id = id(TName, Props),
+    case lists:member(TName, ?TABLES) of
+        true  -> ets:delete(TName, {Id, create}),
+                 ets:delete(TName, {Id, stats});
+        false -> ok
+    end,
+    ets:delete(old_stats, {coarse, {TName, Id}}),
+    case lists:member(TName, ?PROC_STATS_TABLES) of
+        true  -> ets:delete(rabbit_mgmt_stats_tables:key_index(TName), Id);
+        false -> true
+    end.
+
+handle_consumer(Fun, Props) ->
+    P = rabbit_mgmt_format:format(Props, {[], false}),
+    CTag = pget(consumer_tag, P),
+    Q    = pget(queue,        P),
+    Ch   = pget(channel,      P),
+    Fun(consumers_by_queue,  {Q, Ch, CTag}, P),
+    Fun(consumers_by_channel, {Ch, Q, CTag}, P).
+
+%% The consumer_deleted event is emitted by queues themselves -
+%% therefore in the event that a queue dies suddenly we may not get
+%% it. The best way to handle this is to make sure we also clean up
+%% consumers when we hear about any queue going down.
+delete_consumers(PrimId, PrimTableName, SecTableName) ->
+    SecIdCTags = ets:match(PrimTableName, {{PrimId, '$1', '$2'}, '_'}),
+    ets:match_delete(PrimTableName, {{PrimId, '_', '_'}, '_'}),
+    delete_consumers_entry(PrimId, SecTableName, SecIdCTags).
+
+delete_consumers_entry(PrimId, SecTableName, [[SecId, CTag] | SecIdTags]) ->
+    ets:delete(SecTableName, {SecId, PrimId, CTag}),
+    delete_consumers_entry(PrimId, SecTableName, SecIdTags);
+delete_consumers_entry(_PrimId, _SecTableName, []) ->
+    ok.
+
+old_fine_stats(ChPid, Type, Props) ->
+    case pget(Type, Props) of
+        unknown       -> ignore;
+        AllFineStats0 -> [begin
+                              Id = fine_stats_id(ChPid, Ids),
+                              {{fine, Id}, Stats, lookup_element(old_stats, {fine, Id})}
+                          end || {Ids, Stats} <- AllFineStats0]
+    end.
+
+handle_fine_stats_list(ChPid, Timestamp, State, [AllStatsElem | AllStats]) ->
+    handle_fine_stats(ChPid, Timestamp, AllStatsElem, State),
+    handle_fine_stats_list(ChPid, Timestamp, State, AllStats);
+handle_fine_stats_list(_ChPid, _Timestamp, _State, []) ->
+    ok.
+
+handle_fine_stats(_ChPid, _Timestamp, ignore, _State) ->
+    ok;
+handle_fine_stats(ChPid, Timestamp, [{Id, Stats, OldStats} | AllStats], State) ->
+    Total = lists:sum([V || {K, V} <- Stats, lists:member(K, ?DELIVER_GET)]),
+    Stats1 = case Total of
+                 0 -> Stats;
+                 _ -> [{deliver_get, Total}|Stats]
+             end,
+    append_all_samples(Timestamp, OldStats, Id, true, State, Stats1),
+    ets:insert(old_stats, {Id, Stats1}),
+    ets:insert(old_stats_fine_index, {ChPid, Id}),
+    handle_fine_stats(ChPid, Timestamp, AllStats, State);
+handle_fine_stats(_ChPid, _Timestamp, [], _State) ->
+    ok.
+
+delete_samples(Type, Id0) ->
+    [rabbit_mgmt_stats:delete_stats(Table, Id0)
+     || {Table, _} <- rabbit_mgmt_stats_tables:aggr_tables(Type)].
+
+append_set_of_samples(Stats, TS, OldStats, Id, Keys, NoAggKeys, State) ->
+    %% Refactored to avoid duplicated calls to ignore_coarse_sample, ceil and
+    %% ets:insert(old_stats ...)
+    case ignore_coarse_sample(Id, State) of
+        false ->
+            %% This ceil must correspond to the ceil in handle_event
+            %% queue_deleted
+            NewMS = ceil(TS, State),
+            append_samples_by_keys(
+              Stats, NewMS, OldStats, Id, Keys, true, State),
+            append_samples_by_keys(
+              Stats, NewMS, OldStats, Id, NoAggKeys, false, State),
+            ets:insert(old_stats, {Id, Stats});
+        true ->
+            ok
+    end.
+
+append_samples_by_keys(Stats, TS, OldStats, Id, Keys, Agg, State) ->
+    case Keys of
+        all ->
+            append_all_samples(TS, OldStats, Id, Agg, State, Stats);
+        _   ->
+            append_some_samples(TS, OldStats, Id, Agg, State, Stats, Keys)
+    end.
+
+append_some_samples(NewMS, OldStats, Id, Agg, State, Stats, [K | Keys]) ->
+    V = pget(K, Stats),
+    case V =/= 0 orelse lists:member(K, ?ALWAYS_REPORT_STATS) of
+        true ->
+            append_sample(K, V, NewMS, OldStats, Id, Agg, State);
+        false ->
+            ok
+    end,
+    append_some_samples(NewMS, OldStats, Id, Agg, State, Stats, Keys);
+append_some_samples(_NewMS, _OldStats, _Id, _Agg, _State, _Stats, []) ->
+    ok.
+
+append_all_samples(NewMS, OldStats, Id, Agg, State, [{K, 0} | Stats]) ->
+    case lists:member(K, ?ALWAYS_REPORT_STATS) of
+        true ->
+            append_sample(K, 0, NewMS, OldStats, Id, Agg, State);
+        false ->
+            ok
+    end,
+    append_all_samples(NewMS, OldStats, Id, Agg, State, Stats);
+append_all_samples(NewMS, OldStats, Id, Agg, State, [{K, V} | Stats]) ->
+    append_sample(K, V, NewMS, OldStats, Id, Agg, State),
+    append_all_samples(NewMS, OldStats, Id, Agg, State, Stats);
+append_all_samples(_NewMS, _OldStats, _Id, _Agg, _State, []) ->
+    ok.
+
+append_sample(Key, Val, NewMS, OldStats, Id, Agg, State) when is_number(Val) ->
+    OldVal = case pget(Key, OldStats, 0) of
+        N when is_number(N) -> N;
+        _                   -> 0
+    end,
+    record_sample(Id, {Key, Val - OldVal, NewMS, State}, Agg, State),
+    ok;
+append_sample(_Key, _Value, _NewMS, _OldStats, _Id, _Agg, _State) ->
+    ok.
+
+ignore_coarse_sample({coarse, {queue_stats, Q}}, State) ->
+    not object_exists(Q, State);
+ignore_coarse_sample(_, _) ->
+    false.
+
+
+record_sample_list(Id, OldStats, TS, State, [Key | Keys]) ->
+    record_sample(Id, {Key, -pget(Key, OldStats, 0), TS, State}, true, State),
+    record_sample_list(Id, OldStats, TS, State, Keys);
+record_sample_list(_Id, _OldStats, _TS, _State, []) ->
+    ok.
+
+%% Node stats do not have a vhost of course
+record_sample({coarse, {node_stats, _Node} = Id}, Args, true, _State) ->
+    record_sample0(Id, Args);
+
+record_sample({coarse, {node_node_stats, _Names} = Id}, Args, true, _State) ->
+    record_sample0(Id, Args);
+
+record_sample({coarse, Id}, Args, false, _State) ->
+    record_sample0(Id, Args);
+
+record_sample({coarse, Id}, Args, true, _State) ->
+    record_sample0(Id, Args),
+    record_sample0({vhost_stats, vhost(Id)}, Args);
+
+%% Deliveries / acks (Q -> Ch)
+record_sample({fine, {Ch, Q = #resource{kind = queue}}}, Args, true, State) ->
+    case object_exists(Q, State) of
+        true  -> record_sample0({channel_queue_stats, {Ch, Q}}, Args),
+                 record_sample0({queue_stats,         Q},       Args);
+        false -> ok
+    end,
+    record_sample0({channel_stats, Ch},       Args),
+    record_sample0({vhost_stats,   vhost(Q)}, Args);
+
+%% Publishes / confirms (Ch -> X)
+record_sample({fine, {Ch, X = #resource{kind = exchange}}}, Args, true,State) ->
+    case object_exists(X, State) of
+        true  -> record_sample0({channel_exchange_stats, {Ch, X}}, Args),
+                 record_sampleX(publish_in,              X,        Args);
+        false -> ok
+    end,
+    record_sample0({channel_stats, Ch},       Args),
+    record_sample0({vhost_stats,   vhost(X)}, Args);
+
+%% Publishes (but not confirms) (Ch -> X -> Q)
+record_sample({fine, {_Ch,
+                      Q = #resource{kind = queue},
+                      X = #resource{kind = exchange}}}, Args, true, State) ->
+    %% TODO This one logically feels like it should be here. It would
+    %% correspond to "publishing channel message rates to queue" -
+    %% which would be nice to handle - except we don't. And just
+    %% uncommenting this means it gets merged in with "consuming
+    %% channel delivery from queue" - which is not very helpful.
+    %% record_sample0({channel_queue_stats, {Ch, Q}}, Args),
+    QExists = object_exists(Q, State),
+    XExists = object_exists(X, State),
+    case QExists of
+        true  -> record_sample0({queue_stats,          Q},       Args);
+        false -> ok
+    end,
+    case QExists andalso XExists of
+        true  -> record_sample0({queue_exchange_stats, {Q,  X}}, Args);
+        false -> ok
+    end,
+    case XExists of
+        true  -> record_sampleX(publish_out,           X,        Args);
+        false -> ok
+    end.
+
+%% We have to check the queue and exchange objects still exist since
+%% their deleted event could be overtaken by a channel stats event
+%% which contains fine stats referencing them. That's also why we
+%% don't need to check the channels exist - their deleted event can't
+%% be overtaken by their own last stats event.
+%%
+%% Also, sometimes the queue_deleted event is not emitted by the queue
+%% (in the nodedown case) - so it can overtake the final queue_stats
+%% event (which is not *guaranteed* to be lost). So we make a similar
+%% check for coarse queue stats.
+%%
+%% We can be sure that mnesia will be up to date by the time we receive
+%% the event (even though we dirty read) since the deletions are
+%% synchronous and we do not emit the deleted event until after the
+%% deletion has occurred.
+object_exists(Name = #resource{kind = Kind}, #state{lookups = Lookups}) ->
+    case (pget(Kind, Lookups))(Name) of
+        {ok, _} -> true;
+        _       -> false
+    end.
+
+vhost(#resource{virtual_host = VHost}) ->
+    VHost;
+vhost({queue_stats, #resource{virtual_host = VHost}}) ->
+    VHost;
+vhost({TName, Pid}) ->
+    pget(vhost, lookup_element(TName, {Pid, create})).
+
+%% exchanges have two sets of "publish" stats, so rearrange things a touch
+record_sampleX(RenamePublishTo, X, {publish, Diff, TS, State}) ->
+    record_sample0({exchange_stats, X}, {RenamePublishTo, Diff, TS, State});
+record_sampleX(_RenamePublishTo, X, {Type, Diff, TS, State}) ->
+    record_sample0({exchange_stats, X}, {Type, Diff, TS, State}).
+
+%% Ignore case where ID1 and ID2 are in a tuple, i.e. detailed stats,
+%% when in basic mode
+record_sample0({Type, {_ID1, _ID2}}, {_, _, _, #state{rates_mode = basic}})
+  when Type =/= node_node_stats ->
+    ok;
+record_sample0({Type, Id0}, {Key0, Diff, TS, #state{}}) ->
+    {Key, Pos} = stat_type(Key0),
+    Id = {Id0, TS},
+    rabbit_mgmt_stats:record(Id, Pos, Diff, Key,
+                             rabbit_mgmt_stats_tables:aggr_table(Type, Key)).
+
+%%------------------------------------------------------------------------------
+%% @hidden
+%% @doc Returns the type of the stat and the position in the tuple
+%%
+%% Uses the record definitions for simplicity, keeping track of the positions in
+%% the tuple.
+%% @end
+%%------------------------------------------------------------------------------
+stat_type(deliver) ->
+    {deliver_get, #deliver_get.deliver};
+stat_type(deliver_no_ack) ->
+    {deliver_get, #deliver_get.deliver_no_ack};
+stat_type(get) ->
+    {deliver_get, #deliver_get.get};
+stat_type(get_no_ack) ->
+    {deliver_get, #deliver_get.get_no_ack};
+stat_type(publish) ->
+    {fine_stats, #fine_stats.publish};
+stat_type(publish_in) ->
+    {fine_stats, #fine_stats.publish_in};
+stat_type(publish_out) ->
+    {fine_stats, #fine_stats.publish_out};
+stat_type(ack) ->
+    {fine_stats, #fine_stats.ack};
+stat_type(deliver_get) ->
+    {fine_stats, #fine_stats.deliver_get};
+stat_type(confirm) ->
+    {fine_stats, #fine_stats.confirm};
+stat_type(return_unroutable) ->
+    {fine_stats, #fine_stats.return_unroutable};
+stat_type(redeliver) ->
+    {fine_stats, #fine_stats.redeliver};
+stat_type(disk_reads) ->
+    {queue_msg_rates, #queue_msg_rates.disk_reads};
+stat_type(disk_writes) ->
+    {queue_msg_rates, #queue_msg_rates.disk_writes};
+stat_type(messages) ->
+    {queue_msg_counts, #queue_msg_counts.messages};
+stat_type(messages_ready) ->
+    {queue_msg_counts, #queue_msg_counts.messages_ready};
+stat_type(messages_unacknowledged) ->
+    {queue_msg_counts, #queue_msg_counts.messages_unacknowledged};
+stat_type(mem_used) ->
+    {coarse_node_stats, #coarse_node_stats.mem_used};
+stat_type(fd_used) ->
+    {coarse_node_stats, #coarse_node_stats.fd_used};
+stat_type(sockets_used) ->
+    {coarse_node_stats, #coarse_node_stats.sockets_used};
+stat_type(proc_used) ->
+    {coarse_node_stats, #coarse_node_stats.proc_used};
+stat_type(disk_free) ->
+    {coarse_node_stats, #coarse_node_stats.disk_free};
+stat_type(io_read_count) ->
+    {coarse_node_stats, #coarse_node_stats.io_read_count};
+stat_type(io_read_bytes) ->
+    {coarse_node_stats, #coarse_node_stats.io_read_bytes};
+stat_type(io_read_time) ->
+    {coarse_node_stats, #coarse_node_stats.io_read_time};
+stat_type(io_write_count) ->
+    {coarse_node_stats, #coarse_node_stats.io_write_count};
+stat_type(io_write_bytes) ->
+    {coarse_node_stats, #coarse_node_stats.io_write_bytes};
+stat_type(io_write_time) ->
+    {coarse_node_stats, #coarse_node_stats.io_write_time};
+stat_type(io_sync_count) ->
+    {coarse_node_stats, #coarse_node_stats.io_sync_count};
+stat_type(io_sync_time) ->
+    {coarse_node_stats, #coarse_node_stats.io_sync_time};
+stat_type(io_seek_count) ->
+    {coarse_node_stats, #coarse_node_stats.io_seek_count};
+stat_type(io_seek_time) ->
+    {coarse_node_stats, #coarse_node_stats.io_seek_time};
+stat_type(io_reopen_count) ->
+    {coarse_node_stats, #coarse_node_stats.io_reopen_count};
+stat_type(mnesia_ram_tx_count) ->
+    {coarse_node_stats, #coarse_node_stats.mnesia_ram_tx_count};
+stat_type(mnesia_disk_tx_count) ->
+    {coarse_node_stats, #coarse_node_stats.mnesia_disk_tx_count};
+stat_type(msg_store_read_count) ->
+    {coarse_node_stats, #coarse_node_stats.msg_store_read_count};
+stat_type(msg_store_write_count) ->
+    {coarse_node_stats, #coarse_node_stats.msg_store_write_count};
+stat_type(queue_index_journal_write_count) ->
+    {coarse_node_stats, #coarse_node_stats.queue_index_journal_write_count};
+stat_type(queue_index_write_count) ->
+    {coarse_node_stats, #coarse_node_stats.queue_index_write_count};
+stat_type(queue_index_read_count) ->
+    {coarse_node_stats, #coarse_node_stats.queue_index_read_count};
+stat_type(gc_num) ->
+    {coarse_node_stats, #coarse_node_stats.gc_num};
+stat_type(gc_bytes_reclaimed) ->
+    {coarse_node_stats, #coarse_node_stats.gc_bytes_reclaimed};
+stat_type(context_switches) ->
+    {coarse_node_stats, #coarse_node_stats.context_switches};
+stat_type(send_bytes) ->
+    {coarse_node_node_stats, #coarse_node_node_stats.send_bytes};
+stat_type(recv_bytes) ->
+    {coarse_node_node_stats, #coarse_node_node_stats.recv_bytes};
+stat_type(recv_oct) ->
+    {coarse_conn_stats, #coarse_conn_stats.recv_oct};
+stat_type(send_oct) ->
+    {coarse_conn_stats, #coarse_conn_stats.send_oct};
+stat_type(reductions) ->
+    {process_stats, #process_stats.reductions};
+stat_type(io_file_handle_open_attempt_count) ->
+    {coarse_node_stats, #coarse_node_stats.io_file_handle_open_attempt_count};
+stat_type(io_file_handle_open_attempt_time) ->
+    {coarse_node_stats, #coarse_node_stats.io_file_handle_open_attempt_time}.
index 9364b075d9b496cc2a1578d35fca226460c6ef0d..d734637785eb55feed99ec4e955c8381a8a532db 100644 (file)
 
 -module(rabbit_mgmt_format).
 
--export([format/2, print/2, remove/1, ip/1, ipb/1, amqp_table/1, tuple/1]).
+-export([format/2, ip/1, ipb/1, amqp_table/1, tuple/1]).
 -export([parameter/1, now_to_str/1, now_to_str_ms/1, strip_pids/1]).
--export([node_from_pid/1, protocol/1, resource/1, queue/1, queue_state/1]).
+-export([protocol/1, resource/1, queue/1, queue_state/1]).
 -export([exchange/1, user/1, internal_user/1, binding/1, url/2]).
 -export([pack_binding_props/2, tokenise/1]).
 -export([to_amqp_table/1, listener/1, properties/1, basic_properties/1]).
 -export([record/2, to_basic_properties/1]).
 -export([addr/1, port/1]).
 -export([format_nulls/1]).
+-export([print/2, print/1]).
+
+-export([format_queue_stats/1, format_channel_stats/1,
+         format_arguments/1, format_connection_created/1,
+         format_accept_content/1, format_args/1]).
+
+-export([strip_queue_pids/1]).
 
 -import(rabbit_misc, [pget/2, pset/3]).
 
 -include_lib("rabbit_common/include/rabbit.hrl").
 -include_lib("rabbit_common/include/rabbit_framing.hrl").
 
--define(PIDS_TO_STRIP, [connection, owner_pid, channel,
-                        exclusive_consumer_pid]).
-
 %%--------------------------------------------------------------------
 
-format(Stats, Fs) ->
-    lists:concat([format_item(Stat, Fs) || {_Name, Value} = Stat <- Stats,
-                                           Value =/= unknown]).
-
-format_item(Stat, []) ->
-    [Stat];
-format_item({Name, Value}, [{Fun, Names} | Fs]) ->
-    case lists:member(Name, Names) of
-        true  -> case Fun(Value) of
-                     List when is_list(List) -> List;
-                     Formatted               -> [{Name, Formatted}]
-                 end;
-        false -> format_item({Name, Value}, Fs)
-    end.
+format(Stats, {[], _}) ->
+    [Stat || {_Name, Value} = Stat <- Stats, Value =/= unknown];
+format(Stats, {Fs, true}) ->
+    [Fs(Stat) || {_Name, Value} = Stat <- Stats, Value =/= unknown];
+format(Stats, {Fs, false}) ->
+    lists:concat([Fs(Stat) || {_Name, Value} = Stat <- Stats,
+                              Value =/= unknown]).
+
+format_queue_stats({exclusive_consumer_pid, _}) ->
+    [];
+format_queue_stats({slave_pids, ''}) ->
+    [];
+format_queue_stats({slave_pids, Pids}) ->
+    [{slave_nodes, [node(Pid) || Pid <- Pids]}];
+format_queue_stats({synchronised_slave_pids, ''}) ->
+    [];
+format_queue_stats({synchronised_slave_pids, Pids}) ->
+    [{synchronised_slave_nodes, [node(Pid) || Pid <- Pids]}];
+format_queue_stats({backing_queue_status, Value}) ->
+    [{backing_queue_status, properties(Value)}];
+format_queue_stats({idle_since, Value}) ->
+    [{idle_since, now_to_str(Value)}];
+format_queue_stats({state, Value}) ->
+    queue_state(Value);
+format_queue_stats(Stat) ->
+    [Stat].
+
+format_channel_stats({idle_since, Value}) ->
+    {idle_since, now_to_str(Value)};
+format_channel_stats(Stat) ->
+    Stat.
+
+format_arguments({arguments, Value}) ->
+    {arguments, amqp_table(Value)};
+format_arguments(Stat) ->
+    Stat.
+
+format_args({arguments, Value}) ->
+    {arguments, rabbit_mgmt_util:args(Value)};
+format_args(Stat) ->
+    Stat.
+
+format_connection_created({host, Value}) ->
+    {host, addr(Value)};
+format_connection_created({peer_host, Value}) ->
+    {peer_host, addr(Value)};
+format_connection_created({port, Value}) ->
+    {port, port(Value)};
+format_connection_created({peer_port, Value}) ->
+    {peer_port, port(Value)};
+format_connection_created({protocol, Value}) ->
+    {protocol, protocol(Value)};
+format_connection_created({client_properties, Value}) ->
+    {client_properties, amqp_table(Value)};
+format_connection_created(Stat) ->
+    Stat.
+
+format_exchange_and_queue({policy, Value}) ->
+    policy(Value);
+format_exchange_and_queue({arguments, Value}) ->
+    [{arguments, amqp_table(Value)}];
+format_exchange_and_queue({name, Value}) ->
+    resource(Value);
+format_exchange_and_queue(Stat) ->
+    [Stat].
+
+format_binding({source, Value}) ->
+    resource(source, Value);
+format_binding({arguments, Value}) ->
+    [{arguments, amqp_table(Value)}];
+format_binding(Stat) ->
+    [Stat].
+
+format_basic_properties({headers, Value}) ->
+    {headers, amqp_table(Value)};
+format_basic_properties(Stat) ->
+    Stat.
+
+format_accept_content({durable, Value}) ->
+    {durable, rabbit_mgmt_util:parse_bool(Value)};
+format_accept_content({auto_delete, Value}) ->
+    {auto_delete, rabbit_mgmt_util:parse_bool(Value)};
+format_accept_content({internal, Value}) ->
+    {internal, rabbit_mgmt_util:parse_bool(Value)};
+format_accept_content(Stat) ->
+    Stat.
 
 print(Fmt, Val) when is_list(Val) ->
     list_to_binary(lists:flatten(io_lib:format(Fmt, Val)));
 print(Fmt, Val) ->
     print(Fmt, [Val]).
 
-%% TODO - can we remove all these "unknown" cases? Coverage never hits them.
-
-remove(_) -> [].
+print(Val) when is_list(Val) ->
+    list_to_binary(lists:flatten(Val));
+print(Val) ->
+    Val.
 
-node_from_pid(Pid) when is_pid(Pid) -> [{node, node(Pid)}];
-node_from_pid('')                   -> [];
-node_from_pid(unknown)              -> [];
-node_from_pid(none)                 -> [].
-
-nodes_from_pids(Name) ->
-    fun('')   -> [];
-       (Pids) -> [{Name, [node(Pid) || Pid <- Pids]}]
-    end.
+%% TODO - can we remove all these "unknown" cases? Coverage never hits them.
 
 ip(unknown) -> unknown;
 ip(IP)      -> list_to_binary(rabbit_misc:ntoa(IP)).
@@ -226,9 +295,7 @@ url(Fmt, Vals) ->
     print(Fmt, [mochiweb_util:quote_plus(V) || V <- Vals]).
 
 exchange(X) ->
-    format(X, [{fun resource/1,   [name]},
-               {fun amqp_table/1, [arguments]},
-               {fun policy/1,     [policy]}]).
+    format(X, {fun format_exchange_and_queue/1, false}).
 
 %% We get queues using rabbit_amqqueue:list/1 rather than :info_all/1 since
 %% the latter wakes up each queue. Therefore we have a record rather than a
@@ -249,9 +316,7 @@ queue(#amqqueue{name            = Name,
        {arguments,   Arguments},
        {pid,         Pid},
        {state,       State}],
-      [{fun resource/1,   [name]},
-       {fun amqp_table/1, [arguments]},
-       {fun policy/1,     [policy]}]).
+      {fun format_exchange_and_queue/1, false}).
 
 queue_state({syncing, Msgs}) -> [{state,         syncing},
                                  {sync_messages, Msgs}];
@@ -271,12 +336,11 @@ binding(#binding{source      = S,
        {routing_key,      Key},
        {arguments,        Args},
        {properties_key, pack_binding_props(Key, Args)}],
-      [{fun (Res) -> resource(source, Res) end, [source]},
-       {fun amqp_table/1,                       [arguments]}]).
+      {fun format_binding/1, false}).
 
 basic_properties(Props = #'P_basic'{}) ->
     Res = record(Props, record_info(fields, 'P_basic')),
-    format(Res, [{fun amqp_table/1, [headers]}]).
+    format(Res, {fun format_basic_properties/1, true}).
 
 record(Record, Fields) ->
     {Res, _Ix} = lists:foldl(fun (K, {L, Ix}) ->
@@ -315,24 +379,59 @@ to_basic_properties(Props) ->
 a2b(A) ->
     list_to_binary(atom_to_list(A)).
 
+strip_queue_pids(Item) ->
+    strip_queue_pids(Item, []).
+
+strip_queue_pids([{_, unknown} | T], Acc) ->
+    strip_queue_pids(T, Acc);
+strip_queue_pids([{pid, Pid} | T], Acc) when is_pid(Pid) ->
+    strip_queue_pids(T, [{node, node(Pid)} | Acc]);
+strip_queue_pids([{pid, _} | T], Acc) ->
+    strip_queue_pids(T, Acc);
+strip_queue_pids([{owner_pid, _} | T], Acc) ->
+    strip_queue_pids(T, Acc);
+strip_queue_pids([Any | T], Acc) ->
+    strip_queue_pids(T, [Any | Acc]);
+strip_queue_pids([], Acc) ->
+    Acc.
+
 %% Items can be connections, channels, consumers or queues, hence remove takes
 %% various items.
 strip_pids(Item = [T | _]) when is_tuple(T) ->
-    format(Item,
-           [{fun node_from_pid/1, [pid]},
-            {fun remove/1,        ?PIDS_TO_STRIP},
-            {nodes_from_pids(slave_nodes), [slave_pids]},
-            {nodes_from_pids(synchronised_slave_nodes),
-             [synchronised_slave_pids]}
-           ]);
+    strip_pids(Item, []);
 
 strip_pids(Items) -> [strip_pids(I) || I <- Items].
 
+strip_pids([{_, unknown} | T], Acc) ->
+    strip_pids(T, Acc);
+strip_pids([{pid, Pid} | T], Acc) when is_pid(Pid) ->
+    strip_pids(T, [{node, node(Pid)} | Acc]);
+strip_pids([{pid, _} | T], Acc) ->
+    strip_pids(T, Acc);
+strip_pids([{connection, _} | T], Acc) ->
+    strip_pids(T, Acc);
+strip_pids([{owner_pid, _} | T], Acc) ->
+    strip_pids(T, Acc);
+strip_pids([{channel, _} | T], Acc) ->
+    strip_pids(T, Acc);
+strip_pids([{exclusive_consumer_pid, _} | T], Acc) ->
+    strip_pids(T, Acc);
+strip_pids([{slave_pids, ''} | T], Acc) ->
+    strip_pids(T, Acc);
+strip_pids([{slave_pids, Pids} | T], Acc) ->
+    strip_pids(T, [{slave_nodes, [node(Pid) || Pid <- Pids]} | Acc]);
+strip_pids([{synchronised_slave_pids, ''} | T], Acc) ->
+    strip_pids(T, Acc);
+strip_pids([{synchronised_slave_pids, Pids} | T], Acc) ->
+    strip_pids(T, [{synchronised_slave_nodes, [node(Pid) || Pid <- Pids]} | Acc]);
+strip_pids([Any | T], Acc) ->
+    strip_pids(T, [Any | Acc]);
+strip_pids([], Acc) ->
+    Acc.
+
 %% Format for JSON replies. Transforms '' into null
 format_nulls(Items) when is_list(Items) ->
-    lists:foldr(fun (Pair, Acc) ->
-                       [format_null_item(Pair) | Acc]
-               end, [], Items);
+    [format_null_item(Pair) || Pair <- Items];
 format_nulls(Item) ->
     format_null_item(Item).
 
diff --git a/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_queue_stats_collector.erl b/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_queue_stats_collector.erl
new file mode 100644 (file)
index 0000000..1d4602e
--- /dev/null
@@ -0,0 +1,120 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ.
+%%
+%%   The Initial Developer of the Original Code is Pivotal Software, Inc.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_queue_stats_collector).
+
+-include("rabbit_mgmt.hrl").
+-include("rabbit_mgmt_metrics.hrl").
+-include("rabbit_mgmt_event_collector.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+         code_change/3, handle_pre_hibernate/1]).
+
+-export([prioritise_cast/3]).
+
+-import(rabbit_misc, [pget/3]).
+-import(rabbit_mgmt_db, [pget/2, id_name/1, id/2, lookup_element/2]).
+
+prioritise_cast({event, #event{type = queue_stats}}, Len,
+                #state{max_backlog = MaxBacklog} = _State)
+  when Len > MaxBacklog ->
+    drop;
+prioritise_cast(_Msg, _Len, _State) ->
+    0.
+
+%% See the comment on rabbit_mgmt_db for the explanation of
+%% events and stats.
+
+%% Although this gen_server could process all types of events through the
+%% handle_cast, rabbit_mgmt_db_handler (in the management agent) forwards
+%% only the non-prioritiy events channel_stats
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+start_link() ->
+    case gen_server2:start_link({global, ?MODULE}, ?MODULE, [], []) of
+        {ok, Pid} -> register(?MODULE, Pid), %% [1]
+                     {ok, Pid};
+        Else      -> Else
+    end.
+%% [1] For debugging it's helpful to locally register the name too
+%% since that shows up in places global names don't.
+
+%%----------------------------------------------------------------------------
+%% Internal, gen_server2 callbacks
+%%----------------------------------------------------------------------------
+
+init([]) ->
+    {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
+    {ok, RatesMode} = application:get_env(rabbitmq_management, rates_mode),
+    {ok, MaxBacklog} = application:get_env(rabbitmq_management,
+                                           stats_event_max_backlog),
+    process_flag(priority, high),
+    rabbit_log:info("Statistics queue stats collector started.~n"),
+    {ok, reset_lookups(
+           #state{interval               = Interval,
+                  rates_mode             = RatesMode,
+                  max_backlog            = MaxBacklog}), hibernate,
+     {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+%% Used in rabbit_mgmt_test_db where we need guarantees events have
+%% been handled before querying
+handle_call({event, Event = #event{reference = none}}, _From, State) ->
+    rabbit_mgmt_event_collector_utils:handle_event(Event, State),
+    reply(ok, State);
+
+handle_call(_Request, _From, State) ->
+    reply(not_understood, State).
+
+%% Only handle events that are real.
+handle_cast({event, Event = #event{reference = none}}, State) ->
+    rabbit_mgmt_event_collector_utils:handle_event(Event, State),
+    noreply(State);
+
+handle_cast(_Request, State) ->
+    noreply(State).
+
+handle_info(_Info, State) ->
+    noreply(State).
+
+terminate(_Arg, _State) ->
+    ok.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+reply(Reply, NewState) -> {reply, Reply, NewState, hibernate}.
+noreply(NewState) -> {noreply, NewState, hibernate}.
+
+reset_lookups(State) ->
+    State#state{lookups = [{exchange, fun rabbit_exchange:lookup/1},
+                           {queue,    fun rabbit_amqqueue:lookup/1}]}.
+
+handle_pre_hibernate(State) ->
+    %% rabbit_event can end up holding on to some memory after a busy
+    %% workout, but it's not a gen_server so we can't make it
+    %% hibernate. The best we can do is forcibly GC it here (if
+    %% rabbit_mgmt_db is hibernating the odds are rabbit_event is
+    %% quiescing in some way too).
+    rpc:multicall(
+      rabbit_mnesia:cluster_nodes(running), rabbit_mgmt_db_handler, gc, []),
+    {hibernate, State}.
index f748722b5d0558f020e08cb23750e07d152f060b..af8f3108f7070a8fc983d72b8c18197da060c960 100644 (file)
 -module(rabbit_mgmt_stats).
 
 -include("rabbit_mgmt.hrl").
+-include("rabbit_mgmt_metrics.hrl").
 
--export([blank/0, is_blank/1, record/3, format/3, sum/1, gc/2]).
+-export([blank/1, is_blank/3, record/5, format/5, sum/1, gc/3,
+         free/1, delete_stats/2, get_keys/2]).
 
 -import(rabbit_misc, [pget/2]).
 
+-define(ALWAYS_REPORT, [queue_msg_counts, coarse_node_stats]).
+-define(MICRO_TO_MILLI, 1000).
+
+%% Data is stored in ETS tables:
+%% * one set of ETS tables per event (queue_stats, queue_exchange_stats...)
+%% * each set contains one table per group of events (queue_msg_rates,
+%%   deliver_get, fine_stats...) such as aggr_queue_stats_deliver_get
+%%   (see ?AGGR_TABLES in rabbit_mgmt_metrics.hrl)
+%% * data is then stored as a tuple (not a record) to take advantage of the
+%%   atomic call ets:update_counter/3. The equivalent records are noted in
+%%   rabbit_mgmt_metrics.hrl to get the position and as reference for developers
+%% * Records are of the shape:
+%%    {{Id, base}, Field1, Field2, ....} 
+%%    {{Id, total}, Field1, Field2, ....} 
+%%    {{Id, Timestamp}, Field1, Field2, ....} 
+%%    where Id can be a simple key or a tuple {Id0, Id1} 
+%%
+%% This module is not generic any longer, any new event or field needs to be
+%% manually added, but it increases the performance and allows concurrent
+%% GC, event collection and querying
+%%
+
+%%----------------------------------------------------------------------------
+%% External functions
 %%----------------------------------------------------------------------------
 
-blank() -> #stats{diffs = gb_trees:empty(), base = 0}.
+blank(Name) ->
+    ets:new(rabbit_mgmt_stats_tables:index(Name),
+            [bag, public, named_table]),
+    ets:new(rabbit_mgmt_stats_tables:key_index(Name),
+            [ordered_set, public, named_table]),
+    ets:new(Name, [set, public, named_table]).
 
-is_blank(#stats{diffs = Diffs, base = 0}) -> gb_trees:is_empty(Diffs);
-is_blank(#stats{}) ->                        false.
+is_blank({Table, _, _}, Id, Record) ->
+    is_blank(Table, Id, Record);
+is_blank(Table, Id, Record) ->
+    case ets:lookup(Table, {Id, total}) of
+        [] ->
+            true;
+        [Total] ->
+            case lists:member(Record, ?ALWAYS_REPORT) of
+                true -> false;
+                false -> is_blank(Total)
+            end
+    end.
+
+%%----------------------------------------------------------------------------
+free({Table, IndexTable, KeyIndexTable}) ->
+    ets:delete(Table),
+    ets:delete(IndexTable),
+    ets:delete(KeyIndexTable).
+
+delete_stats(Table, {'_', _} = Id) ->
+    delete_complex_stats(Table, Id);
+delete_stats(Table, {_, '_'} = Id) ->
+    delete_complex_stats(Table, Id);
+delete_stats(Table, Id) ->
+    Keys = full_indexes(Table, Id),
+    ets:delete(rabbit_mgmt_stats_tables:index(Table), Id),
+    ets:delete(rabbit_mgmt_stats_tables:key_index(Table), Id),
+    [ets:delete(Table, Key) || Key <- Keys].
+
+delete_complex_stats(Table, Id) ->
+    Ids = ets:select(rabbit_mgmt_stats_tables:key_index(Table),
+                     match_spec_key_index(Id)),
+    delete_complex_stats_loop(Table, Ids).
+
+delete_complex_stats_loop(_Table, []) ->
+    ok;
+delete_complex_stats_loop(Table, [{Id} | Ids]) ->
+    delete_stats(Table, Id),
+    delete_complex_stats_loop(Table, Ids).
+
+%%----------------------------------------------------------------------------
+get_keys(Table, Id0) ->
+    ets:select(rabbit_mgmt_stats_tables:key_index(Table), match_spec_keys(Id0)).
 
 %%----------------------------------------------------------------------------
 %% Event-time
 %%----------------------------------------------------------------------------
 
-record(TS, Diff, Stats = #stats{diffs = Diffs}) ->
-    Diffs2 = case gb_trees:lookup(TS, Diffs) of
-                 {value, Total} -> gb_trees:update(TS, Diff + Total, Diffs);
-                 none           -> gb_trees:insert(TS, Diff, Diffs)
-             end,
-    Stats#stats{diffs = Diffs2}.
+record({Id, _TS} = Key, Pos, Diff, Record, Table) ->
+    ets_update(Table, Key, Record, Pos, Diff),
+    ets_update(Table, {Id, total}, Record, Pos, Diff).
 
 %%----------------------------------------------------------------------------
 %% Query-time
 %%----------------------------------------------------------------------------
 
-format(no_range, #stats{diffs = Diffs, base = Base}, Interval) ->
+format(no_range, Table, Id, Interval, Type) ->
     Now = time_compat:os_system_time(milli_seconds),
+    Counts = get_value(Table, Id, total, Type),
     RangePoint = ((Now div Interval) * Interval) - Interval,
-    Count = sum_entire_tree(gb_trees:iterator(Diffs), Base),
-    {[{rate, format_rate(
-               Diffs, RangePoint, Interval, Interval)}], Count};
+    {Record, Factor} = format_rate_with(
+                         Table, Id, RangePoint, Interval, Interval, Type),
+    format_rate(Type, Record, Counts, Factor);
 
-format(Range, #stats{diffs = Diffs, base = Base}, Interval) ->
+format(Range, Table, Id, Interval, Type) ->
+    Base = get_value(Table, Id, base, Type),
     RangePoint = Range#range.last - Interval,
-    {Samples, Count} = extract_samples(
-                         Range, Base, gb_trees:iterator(Diffs), []),
-    Part1 = [{rate,    format_rate(
-                         Diffs, RangePoint, Range#range.incr, Interval)},
-             {samples, Samples}],
-    Length = length(Samples),
-    Part2 = case Length > 1 of
-                true  -> [{sample, S2}, {timestamp, T2}] = hd(Samples),
-                         [{sample, S1}, {timestamp, T1}] = lists:last(Samples),
-                         Total = lists:sum([pget(sample, I) || I <- Samples]),
-                         [{avg_rate, (S2 - S1) * 1000 / (T2 - T1)},
-                          {avg,      Total / Length}];
-                false -> []
-            end,
-    {Part1 ++ Part2, Count}.
-
-format_rate(Diffs, RangePoint, Incr, Interval) ->
-    case nth_largest(Diffs, 2) of
-        false   -> 0.0;
-        {TS, S} -> case TS - RangePoint of %% [0]
-                       D when D =< Incr andalso D >= 0 -> S * 1000 / Interval;
-                       _                               -> 0.0
-                   end
+    {Samples, Counts} = extract_samples(Range, Base, Table, Id, Type),
+    {Record, Factor} = format_rate_with(
+                         Table, Id, RangePoint, Range#range.incr, Interval, Type),
+    format_rate(Type, Record, Counts, Samples, Factor).
+
+sum([]) -> blank();
+
+sum([{T1, Id} | StatsN]) ->
+    {Table, IndexTable, KeyIndexTable} = T = blank(),
+    AllIds = full_indexes(T1, Id),
+    lists:foreach(fun(Index) ->
+                          case ets:lookup(T1, Index) of
+                              [V] ->
+                                  {_, TS} = element(1, V),
+                                  ets:insert(Table, setelement(1, V, {all, TS})),
+                                  insert_index(IndexTable, KeyIndexTable, {all, TS});
+                              [] -> %% base
+                                  ok
+                          end
+                  end, AllIds),
+    sum(StatsN, T).
+
+sum(StatsN, T) ->
+    lists:foreach(
+      fun ({T1, Id}) ->
+              AllIds = full_indexes(T1, Id),
+              lists:foreach(fun(Index) ->
+                                    case ets:lookup(T1, Index) of
+                                        [V] ->
+                                            {_, TS} = element(1, V),
+                                            ets_update(T, {all, TS}, V);
+                                        [] -> %% base
+                                            ok
+                                    end
+                            end, AllIds)
+      end, StatsN),
+    T.
+
+gc(Cutoff, Table, Id) ->
+    gc(Cutoff, lists:reverse(indexes(Table, Id)), Table, undefined).
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+format_rate_with({Table, IndexTable, _KeyIndexTable}, Id, RangePoint, Incr,
+                 Interval, Type) ->
+    format_rate_with(Table, IndexTable, Id, RangePoint, Incr, Interval, Type);
+format_rate_with(Table, Id, RangePoint, Incr, Interval, Type) ->
+    format_rate_with(Table, rabbit_mgmt_stats_tables:index(Table), Id,
+                     RangePoint, Incr, Interval, Type).
+
+format_rate_with(Table, IndexTable, Id, RangePoint, Incr, Interval, Type) ->
+    case second_largest(Table, IndexTable, Id) of
+        [S] ->
+            {_, TS} = element(1, S),
+            case TS - RangePoint of %% [0]
+                D when D =< Incr andalso D >= 0 -> {S, Interval};
+                _                               -> {S, 0.0}
+            end;
+        _ ->
+            {empty(Id, Type), 0.0}
     end.
 
 %% [0] Only display the rate if it's live - i.e. ((the end of the
@@ -85,19 +194,22 @@ format_rate(Diffs, RangePoint, Incr, Interval) ->
 %% case showing the correct instantaneous rate would be quite a faff,
 %% and probably unwanted). Why the second to last? Because data is
 %% still arriving for the last...
-nth_largest(Tree, N) ->
-    case gb_trees:is_empty(Tree) of
-        true              -> false;
-        false when N == 1 -> gb_trees:largest(Tree);
-        false             -> {_, _, Tree2} = gb_trees:take_largest(Tree),
-                             nth_largest(Tree2, N - 1)
+second_largest(Table, IndexTable, Id) ->
+    case ets:lookup(IndexTable, Id) of
+        [_, _ | _] = List ->
+            ets:lookup(Table, sl(List, {none, 0}, {none, 0}));
+        _ ->
+            unknown
     end.
 
-sum_entire_tree(Iter, Acc) ->
-    case gb_trees:next(Iter) of
-        none            -> Acc;
-        {_TS, S, Iter2} -> sum_entire_tree(Iter2, Acc + S)
-    end.
+sl([{_, TS} = H | T], {_, T1} = L1, _L2) when TS > T1 ->
+    sl(T, H, L1);
+sl([{_, TS} = H | T], L1, {_, T2}) when TS > T2 ->
+    sl(T, L1, H);
+sl([_ | T], L1, L2) ->
+    sl(T, L1, L2);
+sl([], _L1, L2) ->
+    L2.
 
 %% What we want to do here is: given the #range{}, provide a set of
 %% samples such that we definitely provide a set of samples which
@@ -105,79 +217,164 @@ sum_entire_tree(Iter, Acc) ->
 %% not have it. We need to spin up over the entire range of the
 %% samples we *do* have since they are diff-based (and we convert to
 %% absolute values here).
-extract_samples(Range = #range{first = Next}, Base, It, Samples) ->
-    case gb_trees:next(It) of
-        {TS, S, It2} -> extract_samples1(Range, Base, TS,   S, It2, Samples);
-        none         -> extract_samples1(Range, Base, Next, 0, It,  Samples)
+extract_samples(Range, Base, Table, Id, Type) ->
+    %% In order to calculate the average operation time for some of the node
+    %% metrics, it needs to carry around the last raw sample taken (before
+    %% calculations). This is the first element of the 'Samples' tuple.
+    %% It is initialised to the base, which is updated with the latest value until
+    %% it finds the first valid sample. Thus, generating an instant rate for it.
+    %% Afterwards, it will store the last raw sample.
+    extract_samples0(Range, Base, indexes(Table, Id), Table, Type,
+                     {Base, empty_list(Type)}).
+
+extract_samples0(Range = #range{first = Next}, Base, [], Table, Type, Samples) ->
+    %% [3] Empty or finished table
+    extract_samples1(Range, Base, empty({unused_id, Next}, Type), [], Table, Type,
+                     Samples);
+extract_samples0(Range, Base, [Index | List], Tab, Type, Samples) ->
+    Table = case Tab of
+               {T, _, _} ->
+                   T;
+               T ->
+                   T
+           end,
+    case ets:lookup(Table, Index) of
+        [S] ->
+            extract_samples1(Range, Base, S, List, Table, Type, Samples);
+        [] ->
+            extract_samples0(Range, Base, List, Table, Type, Samples)
     end.
 
 extract_samples1(Range = #range{first = Next, last = Last, incr = Incr},
-                 Base, TS, S, It, Samples) ->
+                 Base, S, List, Table, Type, {LastRawSample, Samples}) ->
+    {_, TS} = element(1, S),
     if
         %% We've gone over the range. Terminate.
         Next > Last ->
+            %% Drop the raw sample
             {Samples, Base};
         %% We've hit bang on a sample. Record it and move to the next.
         Next =:= TS ->
-            extract_samples(Range#range{first = Next + Incr}, Base + S, It,
-                            append(Base + S, Next, Samples));
+            %% The new base is the last sample used to generate instant rates
+            %% in the node stats
+            NewBase = add_record(Base, S),
+            extract_samples0(Range#range{first = Next + Incr}, NewBase, List,
+                             Table, Type, {NewBase, append(NewBase, Samples, Next,
+                                                           LastRawSample)});
         %% We haven't yet hit the beginning of our range.
         Next > TS ->
-            extract_samples(Range, Base + S, It, Samples);
+            NewBase = add_record(Base, S),
+            %% Roll the latest value until we find the first sample
+            RawSample = case element(2, Samples) of
+                            [] -> NewBase;
+                            _ -> LastRawSample
+                        end,
+            extract_samples0(Range, NewBase, List, Table, Type,
+                             {RawSample, Samples});
         %% We have a valid sample, but we haven't used it up
         %% yet. Append it and loop around.
         Next < TS ->
-            extract_samples1(Range#range{first = Next + Incr}, Base, TS, S, It,
-                             append(Base, Next, Samples))
+            %% Pass the last raw sample to calculate instant node stats
+            extract_samples1(Range#range{first = Next + Incr}, Base, S,
+                             List, Table, Type,
+                             {Base, append(Base, Samples, Next, LastRawSample)})
     end.
 
-append(S, TS, Samples) -> [[{sample, S}, {timestamp, TS}] | Samples].
+append({_Key, V1}, {samples, V1s}, TiS, _LastRawSample) ->
+    {samples, append_sample(V1, TiS, V1s)};
+append({_Key, V1, V2}, {samples, V1s, V2s}, TiS, _LastRawSample) ->
+    {samples, append_sample(V1, TiS, V1s), append_sample(V2, TiS, V2s)};
+append({_Key, V1, V2, V3}, {samples, V1s, V2s, V3s}, TiS, _LastRawSample) ->
+    {samples, append_sample(V1, TiS, V1s), append_sample(V2, TiS, V2s),
+     append_sample(V3, TiS, V3s)};
+append({_Key, V1, V2, V3, V4}, {samples, V1s, V2s, V3s, V4s}, TiS, _LastRawSample) ->
+    {samples, append_sample(V1, TiS, V1s), append_sample(V2, TiS, V2s),
+     append_sample(V3, TiS, V3s), append_sample(V4, TiS, V4s)};
+append({_Key, V1, V2, V3, V4, V5, V6, V7, V8},
+       {samples, V1s, V2s, V3s, V4s, V5s, V6s, V7s, V8s}, TiS, _LastRawSample) ->
+    {samples, append_sample(V1, TiS, V1s), append_sample(V2, TiS, V2s),
+     append_sample(V3, TiS, V3s), append_sample(V4, TiS, V4s),
+     append_sample(V5, TiS, V5s), append_sample(V6, TiS, V6s),
+     append_sample(V7, TiS, V7s), append_sample(V8, TiS, V8s)};
+append({_Key, V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13, V14, V15,
+        V16, V17, V18, V19, V20, V21, V22, V23, V24, V25, V26, V27, V28},
+       {samples, V1s, V2s, V3s, V4s, V5s, V6s, V7s, V8s, V9s, V10s, V11s, V12s,
+        V13s, V14s, V15s, V16s, V17s, V18s, V19s, V20s, V21s, V22s, V23s, V24s,
+        V25s, V26s, V27s, V28s},
+       TiS,
+       {_, _V1r, _V2r, _V3r, _V4r, _V5r, V6r, _V7r, V8r, V9r, _V10r, V11r,
+        V12r, V13r, V14r, V15r, _V16r, _V17r, _V18r, _V19r, _V20r, _V21r,
+        _V22r, _V23r, _V24r, _V25r, _V26r, V27r, V28r}) ->
+    %% This clause covers the coarse node stats, which must calculate the average
+    %% operation times for read, write, sync and seek. These differ from any other
+    %% statistic and must be caculated using the total time and counter of operations.
+    %% By calculating the new sample against the last sampled point, we provide
+    %% instant averages that truly reflect the behaviour of the system
+    %% during that space of time.
+    {samples, append_sample(V1, TiS, V1s), append_sample(V2, TiS, V2s),
+     append_sample(V3, TiS, V3s), append_sample(V4, TiS, V4s),
+     append_sample(V5, TiS, V5s), append_sample(V6, TiS, V6s),
+     append_sample(V7, TiS, V7s),
+     append_sample(avg_time(V8, V6, V8r, V6r), TiS, V8s),
+     append_sample(V9, TiS, V9s), append_sample(V10, TiS, V10s),
+     append_sample(avg_time(V11, V9, V11r, V9r), TiS, V11s),
+     append_sample(V12, TiS, V12s),
+     append_sample(avg_time(V13, V12, V13r, V12r), TiS, V13s),
+     append_sample(V14, TiS, V14s),
+     append_sample(avg_time(V15, V14, V15r, V14r), TiS, V15s),
+     append_sample(V16, TiS, V16s),
+     append_sample(V17, TiS, V17s), append_sample(V18, TiS, V18s),
+     append_sample(V19, TiS, V19s), append_sample(V20, TiS, V20s),
+     append_sample(V21, TiS, V21s), append_sample(V22, TiS, V22s),
+     append_sample(V23, TiS, V23s), append_sample(V24, TiS, V24s),
+     append_sample(V25, TiS, V25s), append_sample(V26, TiS, V26s),
+     append_sample(V27, TiS, V27s),
+     append_sample(avg_time(V28, V27, V28r, V27r), TiS, V28s)}.
 
-sum([]) -> blank();
+append_sample(S, TS, List) ->
+    [[{sample, S}, {timestamp, TS}] | List].
 
-sum([Stats | StatsN]) ->
-    lists:foldl(
-      fun (#stats{diffs = D1, base = B1}, #stats{diffs = D2, base = B2}) ->
-              #stats{diffs = add_trees(D1, gb_trees:iterator(D2)),
-                     base  = B1 + B2}
-      end, Stats, StatsN).
-
-add_trees(Tree, It) ->
-    case gb_trees:next(It) of
-        none        -> Tree;
-        {K, V, It2} -> add_trees(
-                         case gb_trees:lookup(K, Tree) of
-                             {value, V2} -> gb_trees:update(K, V + V2, Tree);
-                             none        -> gb_trees:insert(K, V, Tree)
-                         end, It2)
-    end.
+blank() ->
+    Table = ets:new(rabbit_mgmt_stats, [ordered_set, public]),
+    Index = ets:new(rabbit_mgmt_stats, [bag, public]),
+    KeyIndex = ets:new(rabbit_mgmt_stats, [ordered_set, public]),
+    {Table, Index, KeyIndex}.
 
 %%----------------------------------------------------------------------------
 %% Event-GCing
 %%----------------------------------------------------------------------------
-
-gc(Cutoff, #stats{diffs = Diffs, base = Base}) ->
-    List = lists:reverse(gb_trees:to_list(Diffs)),
-    gc(Cutoff, List, [], Base).
-
 %% Go through the list, amalgamating all too-old samples with the next
 %% newest keepable one [0] (we move samples forward in time since the
 %% semantics of a sample is "we had this many x by this time"). If the
 %% sample is too old, but would not be too old if moved to a rounder
 %% timestamp which does not exist then invent one and move it there
 %% [1]. But if it's just outright too old, move it to the base [2].
-gc(_Cutoff, [], Keep, Base) ->
-    #stats{diffs = gb_trees:from_orddict(Keep), base = Base};
-gc(Cutoff, [H = {TS, S} | T], Keep, Base) ->
-    {NewKeep, NewBase} =
-        case keep(Cutoff, TS) of
-            keep                       -> {[H | Keep],           Base};
-            drop                       -> {Keep,             S + Base}; %% [2]
-            {move, D} when Keep =:= [] -> {[{TS + D, S}],        Base}; %% [1]
-            {move, _}                  -> [{KTS, KS} | KT] = Keep,
-                                          {[{KTS, KS + S} | KT], Base}  %% [0]
-        end,
-    gc(Cutoff, T, NewKeep, NewBase).
+gc(_Cutoff, [], _Table, _Keep) ->
+    ok;
+gc(Cutoff, [Index | T], Table, Keep) ->
+    case ets:lookup(Table, Index) of
+        [S] ->
+            {Id, TS} = Key = element(1, S),
+            Keep1 = case keep(Cutoff, TS) of
+                        keep ->
+                            TS;
+                        drop -> %% [2]
+                            ets_update(Table, {Id, base}, S),
+                            ets_delete_value(Table, Key),
+                            Keep;
+                        {move, D} when Keep =:= undefined -> %% [1]
+                            ets_update(Table, {Id, TS + D}, S),
+                            ets_delete_value(Table, Key),
+                            TS + D;
+                        {move, _} -> %% [0]
+                            ets_update(Table, {Id, Keep}, S),
+                            ets_delete_value(Table, Key),
+                            Keep
+                    end,
+            gc(Cutoff, T, Table, Keep1);
+        _ ->
+            gc(Cutoff, T, Table, Keep)
+    end.
 
 keep({Policy, Now}, TS) ->
     lists:foldl(fun ({AgeSec, DivisorSec}, Action) ->
@@ -199,3 +396,583 @@ prefer_action({move, A}, {move, B}) -> {move, lists:min([A, B])};
 prefer_action({move, A},      drop) -> {move, A};
 prefer_action(drop,      {move, A}) -> {move, A};
 prefer_action(drop,           drop) -> drop.
+
+%%----------------------------------------------------------------------------
+%% ETS update
+%%----------------------------------------------------------------------------
+ets_update(Table, K, R, P, V) ->
+    try
+        ets:update_counter(Table, K, {P, V})
+    catch
+        _:_ ->
+            ets:insert(Table, new_record(K, R, P, V)),
+            insert_index(Table, K)
+    end.
+
+insert_index(Table, Key) ->
+    insert_index(rabbit_mgmt_stats_tables:index(Table),
+                 rabbit_mgmt_stats_tables:key_index(Table),
+                 Key).
+
+insert_index(_, _, {_, V}) when is_atom(V) ->
+    ok;
+insert_index(Index, KeyIndex, {Id, _TS} = Key) ->
+    ets:insert(Index, Key),
+    ets:insert(KeyIndex, {Id}).
+
+ets_update({Table, IndexTable, KeyIndexTable}, Key, Record) ->
+    try
+        ets:update_counter(Table, Key, record_to_list(Record))
+    catch
+        _:_ ->
+            ets:insert(Table, setelement(1, Record, Key)),
+            insert_index(IndexTable, KeyIndexTable, Key)
+    end;
+ets_update(Table, Key, Record) ->
+    try
+        ets:update_counter(Table, Key, record_to_list(Record))
+    catch
+        _:_ ->
+            ets:insert(Table, setelement(1, Record, Key)),
+            insert_index(Table, Key)
+    end.
+
+new_record(K, deliver_get, P, V) ->
+    setelement(P, {K, 0, 0, 0, 0}, V);
+new_record(K, fine_stats, P, V) ->
+    setelement(P, {K, 0, 0, 0, 0, 0, 0, 0, 0}, V);
+new_record(K, queue_msg_rates, P, V) ->
+    setelement(P, {K, 0, 0}, V);
+new_record(K, queue_msg_counts, P, V) ->
+    setelement(P, {K, 0, 0, 0}, V);
+new_record(K, coarse_node_stats, P, V) ->
+    setelement(P, {K, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                   0, 0, 0, 0, 0, 0, 0, 0, 0}, V);
+new_record(K, coarse_node_node_stats, P, V) ->
+    setelement(P, {K, 0, 0}, V);
+new_record(K, coarse_conn_stats, P, V) ->
+    setelement(P, {K, 0, 0}, V);
+new_record(K, process_stats, P, V) ->
+    setelement(P, {K, 0}, V).
+
+%% Returns a list of {Position, Increment} to update the current record
+record_to_list({_Key, V1}) ->
+    [{2, V1}];
+record_to_list({_Key, V1, V2}) ->
+    [{2, V1}, {3, V2}];
+record_to_list({_Key, V1, V2, V3}) ->
+    [{2, V1}, {3, V2}, {4, V3}];
+record_to_list({_Key, V1, V2, V3, V4}) ->
+    [{2, V1}, {3, V2}, {4, V3}, {5, V4}];
+record_to_list({_Key, V1, V2, V3, V4, V5, V6, V7, V8}) ->
+    [{2, V1}, {3, V2}, {4, V3}, {5, V4}, {6, V5}, {7, V6}, {8, V7}, {9, V8}];
+record_to_list({_Key, V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12,
+                V13, V14, V15, V16, V17, V18, V19, V20, V21, V22, V23, V24, V25,
+                V26, V27, V28}) ->
+    [{2, V1}, {3, V2}, {4, V3}, {5, V4}, {6, V5}, {7, V6}, {8, V7}, {9, V8},
+     {10, V9}, {11, V10}, {12, V11}, {13, V12}, {14, V13}, {15, V14},
+     {16, V15}, {17, V16}, {18, V17}, {19, V18}, {20, V19}, {21, V20},
+     {22, V21}, {23, V22}, {24, V23}, {25, V24}, {26, V25}, {27, V26},
+     {28, V27}, {29, V28}].
+
+%%----------------------------------------------------------------------------
+
+get_value({Table, _, _}, Id, Tag, Type) ->
+    get_value(Table, Id, Tag, Type);
+get_value(Table, Id, Tag, Type) ->
+    Key = {Id, Tag},
+    case ets:lookup(Table, Key) of
+        [] -> empty(Key, Type);
+        [Elem] -> Elem
+    end.
+
+ets_delete_value(Table, Key) ->
+    ets:delete_object(rabbit_mgmt_stats_tables:index(Table), Key),
+    ets:delete(Table, Key).
+
+indexes({_, Index, _}, Id) ->
+    lists:sort(ets:lookup(Index, Id));
+indexes(Table, Id) ->
+    lists:sort(ets:lookup(rabbit_mgmt_stats_tables:index(Table), Id)).
+
+full_indexes(Table, Id) ->
+    full_indexes(Table, rabbit_mgmt_stats_tables:index(Table), Id).
+
+full_indexes(_Table, Index, Id) ->
+    Indexes = ets:lookup(Index, Id),
+    [{Id, base}, {Id, total} | Indexes].
+
+%%----------------------------------------------------------------------------
+%% Match specs to select or delete from the ETS tables
+%%----------------------------------------------------------------------------
+match_spec_key_index(Id) ->
+    MatchHead = {partial_match(Id)},
+    Id0 = to_simple_match_spec(Id),
+    [{MatchHead, [{'==', Id0, '$1'}],['$_']}].
+
+partial_match({_Id0, '_'}) ->
+    {'$1', '_'};
+partial_match({'_', _Id1}) ->
+    {'_', '$1'}.
+
+to_simple_match_spec({Id0, '_'}) when is_tuple(Id0) ->
+    {Id0};
+to_simple_match_spec({'_', Id1}) when is_tuple(Id1) ->
+    {Id1};
+to_simple_match_spec({Id0, '_'}) ->
+    Id0;
+to_simple_match_spec({'_', Id1}) ->
+    Id1;
+to_simple_match_spec(Id) when is_tuple(Id) ->
+    {Id};
+to_simple_match_spec(Id) ->
+    Id.
+
+to_match_condition({'_', Id1}) when is_tuple(Id1) ->
+    {'==', {Id1}, '$2'};
+to_match_condition({'_', Id1}) ->
+    {'==', Id1, '$2'};
+to_match_condition({Id0, '_'}) when is_tuple(Id0) ->
+    {'==', {Id0}, '$1'};
+to_match_condition({Id0, '_'}) ->
+    {'==', Id0, '$1'}.
+
+match_spec_keys(Id) ->
+    MatchCondition = to_match_condition(Id),
+    MatchHead = {{'$1', '$2'}},
+    [{MatchHead, [MatchCondition], [{{'$1', '$2'}}]}].
+
+%%----------------------------------------------------------------------------
+%% Format output
+%%----------------------------------------------------------------------------
+format_rate(deliver_get, {_, D, DN, G, GN}, {_, TD, TDN, TG, TGN}, Factor) ->
+    [
+     {deliver, TD}, {deliver_details, [{rate, apply_factor(D, Factor)}]},
+     {deliver_no_ack, TDN},
+     {deliver_no_ack_details, [{rate, apply_factor(DN, Factor)}]},
+     {get, TG}, {get_details, [{rate, apply_factor(G, Factor)}]},
+     {get_no_ack, TGN},
+     {get_no_ack_details, [{rate, apply_factor(GN, Factor)}]}
+    ];
+format_rate(fine_stats, {_, P, PI, PO, A, D, C, RU, R},
+            {_, TP, TPI, TPO, TA, TD, TC, TRU, TR}, Factor) ->
+    [
+     {publish, TP}, {publish_details, [{rate, apply_factor(P, Factor)}]},
+     {publish_in, TPI},
+      {publish_in_details, [{rate, apply_factor(PI, Factor)}]},
+     {publish_out, TPO},
+     {publish_out_details, [{rate, apply_factor(PO, Factor)}]},
+     {ack, TA}, {ack_details, [{rate, apply_factor(A, Factor)}]},
+     {deliver_get, TD}, {deliver_get_details, [{rate, apply_factor(D, Factor)}]},
+     {confirm, TC}, {confirm_details, [{rate, apply_factor(C, Factor)}]},
+     {return_unroutable, TRU},
+     {return_unroutable_details, [{rate, apply_factor(RU, Factor)}]},
+     {redeliver, TR}, {redeliver_details, [{rate, apply_factor(R, Factor)}]}
+    ];
+format_rate(queue_msg_rates, {_, R, W}, {_, TR, TW}, Factor) ->
+    [
+     {disk_reads, TR}, {disk_reads_details, [{rate, apply_factor(R, Factor)}]},
+     {disk_writes, TW}, {disk_writes_details, [{rate, apply_factor(W, Factor)}]}
+    ];
+format_rate(queue_msg_counts, {_, M, MR, MU}, {_, TM, TMR, TMU}, Factor) ->
+    [
+     {messages, TM},
+     {messages_details, [{rate, apply_factor(M, Factor)}]},
+     {messages_ready, TMR},
+     {messages_ready_details, [{rate, apply_factor(MR, Factor)}]},
+     {messages_unacknowledged, TMU},
+     {messages_unacknowledged_details, [{rate, apply_factor(MU, Factor)}]}
+    ];
+format_rate(coarse_node_stats,
+            {_, M, F, S, P, D, IR, IB, IA, IWC, IWB, IWAT, IS, ISAT, ISC,
+             ISEAT, IRC, MRTC, MDTC, MSRC, MSWC, QIJWC, QIWC, QIRC, GC, GCW, CS,
+             IO, IOAT},
+            {_, TM, TF, TS, TP, TD, TIR, TIB, TIA, TIWC, TIWB, TIWAT, TIS,
+             TISAT, TISC, TISEAT, TIRC, TMRTC, TMDTC, TMSRC, TMSWC, TQIJWC,
+             TQIWC, TQIRC, TGC, TGCW, TCS, TIO, TIOAT}, Factor) ->
+    %% Calculates average times for read/write/sync/seek from the
+    %% accumulated time and count
+    %% io_<op>_avg_time is the average operation time for the life of the node
+    %% io_<op>_avg_time_details/rate is the average operation time during the
+    %% last time unit calculated (thus similar to an instant rate)
+    [
+     {mem_used, TM},
+     {mem_used_details, [{rate, apply_factor(M, Factor)}]},
+     {fd_used, TF},
+     {fd_used_details, [{rate, apply_factor(F, Factor)}]},
+     {sockets_used, TS},
+     {sockets_used_details, [{rate, apply_factor(S, Factor)}]},
+     {proc_used, TP},
+     {proc_used_details, [{rate, apply_factor(P, Factor)}]},
+     {disk_free, TD},
+     {disk_free_details, [{rate, apply_factor(D, Factor)}]},
+     {io_read_count, TIR},
+     {io_read_count_details, [{rate, apply_factor(IR, Factor)}]},
+     {io_read_bytes, TIB},
+     {io_read_bytes_details, [{rate, apply_factor(IB, Factor)}]},
+     {io_read_avg_time, avg_time(TIA, TIR)},
+     {io_read_avg_time_details, [{rate, avg_time(IA, IR)}]},
+     {io_write_count, TIWC},
+     {io_write_count_details, [{rate, apply_factor(IWC, Factor)}]},
+     {io_write_bytes, TIWB},
+     {io_write_bytes_details, [{rate, apply_factor(IWB, Factor)}]},
+     {io_write_avg_time, avg_time(TIWAT, TIWC)},
+     {io_write_avg_time_details, [{rate, avg_time(IWAT, IWC)}]},
+     {io_sync_count, TIS},
+     {io_sync_count_details, [{rate, apply_factor(IS, Factor)}]},
+     {io_sync_avg_time, avg_time(TISAT, TIS)},
+     {io_sync_avg_time_details, [{rate, avg_time(ISAT, IS)}]},
+     {io_seek_count, TISC},
+     {io_seek_count_details, [{rate, apply_factor(ISC, Factor)}]},
+     {io_seek_avg_time, avg_time(TISEAT, TISC)},
+     {io_seek_avg_time_details, [{rate, avg_time(ISEAT, ISC)}]},
+     {io_reopen_count, TIRC},
+     {io_reopen_count_details, [{rate, apply_factor(IRC, Factor)}]},
+     {mnesia_ram_tx_count, TMRTC},
+     {mnesia_ram_tx_count_details, [{rate, apply_factor(MRTC, Factor)}]},
+     {mnesia_disk_tx_count, TMDTC},
+     {mnesia_disk_tx_count_details, [{rate, apply_factor(MDTC, Factor)}]},
+     {msg_store_read_count, TMSRC},
+     {msg_store_read_count_details, [{rate, apply_factor(MSRC, Factor)}]},
+     {msg_store_write_count, TMSWC},
+     {msg_store_write_count_details, [{rate, apply_factor(MSWC, Factor)}]},
+     {queue_index_journal_write_count, TQIJWC},
+     {queue_index_journal_write_count_details, [{rate, apply_factor(QIJWC, Factor)}]},
+     {queue_index_write_count, TQIWC},
+     {queue_index_write_count_details, [{rate, apply_factor(QIWC, Factor)}]},
+     {queue_index_read_count, TQIRC},
+     {queue_index_read_count_details, [{rate, apply_factor(QIRC, Factor)}]},
+     {gc_num, TGC},
+     {gc_num_details, [{rate, apply_factor(GC, Factor)}]},
+     {gc_bytes_reclaimed, TGCW},
+     {gc_bytes_reclaimed_details, [{rate, apply_factor(GCW, Factor)}]},
+     {context_switches, TCS},
+     {context_switches_details, [{rate, apply_factor(CS, Factor)}]},
+     {io_file_handle_open_attempt_count, TIO},
+     {io_file_handle_open_attempt_count_details, [{rate, apply_factor(IO, Factor)}]},
+     {io_file_handle_open_attempt_avg_time, avg_time(TIOAT, TIO)},
+     {io_file_handle_open_attempt_avg_time_details, [{rate, avg_time(IOAT, IO)}]}
+    ];
+format_rate(coarse_node_node_stats, {_, S, R}, {_, TS, TR}, Factor) ->
+    [
+     {send_bytes, TS},
+     {send_bytes_details, [{rate, apply_factor(S, Factor)}]},
+     {send_bytes, TR},
+     {send_bytes_details, [{rate, apply_factor(R, Factor)}]}
+    ];
+format_rate(coarse_conn_stats, {_, R, S}, {_, TR, TS}, Factor) ->
+    [
+     {send_oct, TS},
+     {send_oct_details, [{rate, apply_factor(S, Factor)}]},
+     {recv_oct, TR},
+     {recv_oct_details, [{rate, apply_factor(R, Factor)}]}
+    ];
+format_rate(process_stats, {_, R}, {_, TR}, Factor) ->
+    [
+     {reductions, TR},
+     {reductions_details, [{rate, apply_factor(R, Factor)}]}
+    ].
+
+format_rate(deliver_get, {_, D, DN, G, GN}, {_, TD, TDN, TG, TGN},
+            {_, SD, SDN, SG, SGN}, Factor) ->
+    Length = length(SD),
+    [
+     {deliver, TD}, {deliver_details, [{rate, apply_factor(D, Factor)},
+                                       {samples, SD}] ++ average(SD, Length)},
+     {deliver_no_ack, TDN},
+     {deliver_no_ack_details, [{rate, apply_factor(DN, Factor)},
+                               {samples, SDN}] ++ average(SDN, Length)},
+     {get, TG}, {get_details, [{rate, apply_factor(G, Factor)},
+                               {samples, SG}] ++ average(SG, Length)},
+     {get_no_ack, TGN},
+     {get_no_ack_details, [{rate, apply_factor(GN, Factor)},
+                           {samples, SGN}] ++ average(SGN, Length)}
+    ];
+format_rate(fine_stats, {_, P, PI, PO, A, D, C, RU, R},
+            {_, TP, TPI, TPO, TA, TD, TC, TRU, TR},
+            {_, SP, SPI, SPO, SA, SD, SC, SRU, SR}, Factor) ->
+    Length = length(SP),
+    [
+     {publish, TP},
+     {publish_details, [{rate, apply_factor(P, Factor)},
+                        {samples, SP}] ++ average(SP, Length)},
+     {publish_in, TPI},
+     {publish_in_details, [{rate, apply_factor(PI, Factor)},
+                           {samples, SPI}] ++ average(SPI, Length)},
+     {publish_out, TPO},
+     {publish_out_details, [{rate, apply_factor(PO, Factor)},
+                            {samples, SPO}] ++ average(SPO, Length)},
+     {ack, TA}, {ack_details, [{rate, apply_factor(A, Factor)},
+                               {samples, SA}] ++ average(SA, Length)},
+     {deliver_get, TD},
+     {deliver_get_details, [{rate, apply_factor(D, Factor)},
+                            {samples, SD}] ++ average(SD, Length)},
+     {confirm, TC},
+     {confirm_details, [{rate, apply_factor(C, Factor)},
+                        {samples, SC}] ++ average(SC, Length)},
+     {return_unroutable, TRU},
+     {return_unroutable_details, [{rate, apply_factor(RU, Factor)},
+                                  {samples, SRU}] ++ average(SRU, Length)},
+     {redeliver, TR},
+     {redeliver_details, [{rate, apply_factor(R, Factor)},
+                          {samples, SR}] ++ average(SR, Length)}
+    ];
+format_rate(queue_msg_rates, {_, R, W}, {_, TR, TW}, {_, SR, SW}, Factor) ->
+    Length = length(SR),
+    [
+     {disk_reads, TR},
+     {disk_reads_details, [{rate, apply_factor(R, Factor)},
+                           {samples, SR}] ++ average(SR, Length)},
+     {disk_writes, TW},
+     {disk_writes_details, [{rate, apply_factor(W, Factor)},
+                            {samples, SW}] ++ average(SW, Length)}
+    ];
+format_rate(queue_msg_counts, {_, M, MR, MU}, {_, TM, TMR, TMU},
+            {_, SM, SMR, SMU}, Factor) ->
+    Length = length(SM),
+    [
+     {messages, TM},
+     {messages_details, [{rate, apply_factor(M, Factor)},
+                         {samples, SM}] ++ average(SM, Length)},
+     {messages_ready, TMR},
+     {messages_ready_details, [{rate, apply_factor(MR, Factor)},
+                               {samples, SMR}] ++ average(SMR, Length)},
+     {messages_unacknowledged, TMU},
+     {messages_unacknowledged_details, [{rate, apply_factor(MU, Factor)},
+                                        {samples, SMU}] ++ average(SMU, Length)}
+    ];
+format_rate(coarse_node_stats,
+            {_, M, F, S, P, D, IR, IB, IA, IWC, IWB, IWAT, IS, ISAT, ISC,
+             ISEAT, IRC, MRTC, MDTC, MSRC, MSWC, QIJWC, QIWC, QIRC, GC, GCW, CS,
+             IO, IOAT},
+            {_, TM, TF, TS, TP, TD, TIR, TIB, TIA, TIWC, TIWB, TIWAT, TIS,
+             TISAT, TISC, TISEAT, TIRC, TMRTC, TMDTC, TMSRC, TMSWC, TQIJWC,
+             TQIWC, TQIRC, TGC, TGCW, TCS, TIO, TIOAT},
+            {_, SM, SF, SS, SP, SD, SIR, SIB, SIA, SIWC, SIWB, SIWAT, SIS,
+             SISAT, SISC, SISEAT, SIRC, SMRTC, SMDTC, SMSRC, SMSWC, SQIJWC,
+             SQIWC, SQIRC, SGC, SGCW, SCS, SIO, SIOAT}, Factor) ->
+    %% Calculates average times for read/write/sync/seek from the
+    %% accumulated time and count.
+    %% io_<op>_avg_time is the average operation time for the life of the node.
+    %% io_<op>_avg_time_details/rate is the average operation time during the
+    %% last time unit calculated (thus similar to an instant rate).
+    %% io_<op>_avg_time_details/samples contain the average operation time
+    %% during each time unit requested.
+    %% io_<op>_avg_time_details/avg_rate is meaningless here, but we keep it
+    %% to maintain an uniform API with all the other metrics.
+    %% io_<op>_avg_time_details/avg is the average of the samples taken over
+    %% the requested period of time.
+    Length = length(SM),
+    [
+     {mem_used, TM},
+     {mem_used_details, [{rate, apply_factor(M, Factor)},
+                         {samples, SM}] ++ average(SM, Length)},
+     {fd_used, TF},
+     {fd_used_details, [{rate, apply_factor(F, Factor)},
+                        {samples, SF}] ++ average(SF, Length)},
+     {sockets_used, TS},
+     {sockets_used_details, [{rate, apply_factor(S, Factor)},
+                             {samples, SS}] ++ average(SS, Length)},
+     {proc_used, TP},
+     {proc_used_details, [{rate, apply_factor(P, Factor)},
+                          {samples, SP}] ++ average(SP, Length)},
+     {disk_free, TD},
+     {disk_free_details, [{rate, apply_factor(D, Factor)},
+                          {samples, SD}] ++ average(SD, Length)},
+     {io_read_count, TIR},
+     {io_read_count_details, [{rate, apply_factor(IR, Factor)},
+                              {samples, SIR}] ++ average(SIR, Length)},
+     {io_read_bytes, TIB},
+     {io_read_bytes_details, [{rate, apply_factor(IB, Factor)},
+                              {samples, SIB}] ++ average(SIB, Length)},
+     {io_read_avg_time, avg_time(TIA, TIR)},
+     {io_read_avg_time_details, [{rate, avg_time(IA, IR)},
+                                 {samples, SIA}] ++ average(SIA, Length)},
+     {io_write_count, TIWC},
+     {io_write_count_details, [{rate, apply_factor(IWC, Factor)},
+                               {samples, SIWC}] ++ average(SIWC, Length)},
+     {io_write_bytes, TIWB},
+     {io_write_bytes_details, [{rate, apply_factor(IWB, Factor)},
+                               {samples, SIWB}] ++ average(SIWB, Length)},
+     {io_write_avg_time, avg_time(TIWAT, TIWC)},
+     {io_write_avg_time_details, [{rate, avg_time(IWAT, TIWC)},
+                                  {samples, SIWAT}] ++ average(SIWAT, Length)},
+     {io_sync_count, TIS},
+     {io_sync_count_details, [{rate, apply_factor(IS, Factor)},
+                              {samples, SIS}] ++ average(SIS, Length)},
+     {io_sync_avg_time, avg_time(TISAT, TIS)},
+     {io_sync_avg_time_details, [{rate, avg_time(ISAT, IS)},
+                                 {samples, SISAT}] ++ average(SISAT, Length)},
+     {io_seek_count, TISC},
+     {io_seek_count_details, [{rate, apply_factor(ISC, Factor)},
+                              {samples, SISC}] ++ average(SISC, Length)},
+     {io_seek_avg_time, avg_time(TISEAT, TISC)},
+     {io_seek_avg_time_details, [{rate, avg_time(ISEAT, ISC)},
+                                 {samples, SISEAT}] ++ average(SISEAT, Length)},
+     {io_reopen_count, TIRC},
+     {io_reopen_count_details, [{rate, apply_factor(IRC, Factor)},
+                                {samples, SIRC}] ++ average(SIRC, Length)},
+     {mnesia_ram_tx_count, TMRTC},
+     {mnesia_ram_tx_count_details, [{rate, apply_factor(MRTC, Factor)},
+                                    {samples, SMRTC}] ++ average(SMRTC, Length)},
+     {mnesia_disk_tx_count, TMDTC},
+     {mnesia_disk_tx_count_details, [{rate, apply_factor(MDTC, Factor)},
+                                     {samples, SMDTC}] ++ average(SMDTC, Length)},
+     {msg_store_read_count, TMSRC},
+     {msg_store_read_count_details, [{rate, apply_factor(MSRC, Factor)},
+                                     {samples, SMSRC}] ++ average(SMSRC, Length)},
+     {msg_store_write_count, TMSWC},
+     {msg_store_write_count_details, [{rate, apply_factor(MSWC, Factor)},
+                                      {samples, SMSWC}] ++ average(SMSWC, Length)},
+     {queue_index_journal_write_count, TQIJWC},
+     {queue_index_journal_write_count_details,
+      [{rate, apply_factor(QIJWC, Factor)},
+       {samples, SQIJWC}] ++ average(SQIJWC, Length)},
+     {queue_index_write_count, TQIWC},
+     {queue_index_write_count_details, [{rate, apply_factor(QIWC, Factor)},
+                                        {samples, SQIWC}] ++ average(SQIWC, Length)},
+     {queue_index_read_count, TQIRC},
+     {queue_index_read_count_details, [{rate, apply_factor(QIRC, Factor)},
+                                       {samples, SQIRC}] ++ average(SQIRC, Length)},
+     {gc_num, TGC},
+     {gc_num_details, [{rate, apply_factor(GC, Factor)},
+                       {samples, SGC}] ++ average(SGC, Length)},
+     {gc_bytes_reclaimed, TGCW},
+     {gc_bytes_reclaimed_details, [{rate, apply_factor(GCW, Factor)},
+                                   {samples, SGCW}] ++ average(SGCW, Length)},
+     {context_switches, TCS},
+     {context_switches_details, [{rate, apply_factor(CS, Factor)},
+                                 {samples, SCS}] ++ average(SCS, Length)},
+     {io_file_handle_open_attempt_count, TIO},
+     {io_file_handle_open_attempt_count_details,
+      [{rate, apply_factor(IO, Factor)},
+       {samples, SIO}] ++ average(SIO, Length)},
+     {io_file_handle_open_attempt_avg_time, avg_time(TIOAT, TIO)},
+     {io_file_handle_open_attempt_avg_time_details,
+      [{rate, avg_time(IOAT, IO)},
+       {samples, SIOAT}] ++ average(SIOAT, Length)}
+    ];
+format_rate(coarse_node_node_stats, {_, S, R}, {_, TS, TR}, {_, SS, SR},
+            Factor) ->
+    Length = length(SS),
+    [
+     {send_bytes, TS},
+     {send_bytes_details, [{rate, apply_factor(S, Factor)},
+                           {samples, SS}] ++ average(SS, Length)},
+     {send_bytes, TR},
+     {send_bytes_details, [{rate, apply_factor(R, Factor)},
+                           {samples, SR}] ++ average(SR, Length)}
+    ];
+format_rate(coarse_conn_stats, {_, R, S}, {_, TR, TS}, {_, SR, SS},
+            Factor) ->
+    Length = length(SS),
+    [
+     {send_oct, TS},
+     {send_oct_details, [{rate, apply_factor(S, Factor)},
+                         {samples, SS}] ++ average(SS, Length)},
+     {recv_oct, TR},
+     {recv_oct_details, [{rate, apply_factor(R, Factor)},
+                         {samples, SR}] ++ average(SR, Length)}
+    ];
+format_rate(process_stats, {_, R}, {_, TR}, {_, SR}, Factor) ->
+    Length = length(SR),
+    [
+     {reductions, TR},
+     {reductions_details, [{rate, apply_factor(R, Factor)},
+                           {samples, SR}] ++ average(SR, Length)}
+    ].
+
+apply_factor(_, 0.0) ->
+    0.0;
+apply_factor(S, Factor) ->
+    S * 1000 / Factor.
+
+average(_Samples, Length) when Length =< 1 ->
+    [];
+average(Samples, Length) ->
+    [{sample, S2}, {timestamp, T2}] = hd(Samples),
+    [{sample, S1}, {timestamp, T1}] = lists:last(Samples),
+    Total = lists:sum([pget(sample, I) || I <- Samples]),
+    [{avg_rate, (S2 - S1) * 1000 / (T2 - T1)},
+     {avg,      Total / Length}].
+%%----------------------------------------------------------------------------
+
+add_record({Base, V1}, {_, V11}) ->
+    {Base, V1 + V11};
+add_record({Base, V1, V2}, {_, V11, V21}) ->
+    {Base, V1 + V11, V2 + V21};
+add_record({Base, V1, V2, V3}, {_, V1a, V2a, V3a}) ->
+    {Base, V1 + V1a, V2 + V2a, V3 + V3a};
+add_record({Base, V1, V2, V3, V4}, {_, V1a, V2a, V3a, V4a}) ->
+    {Base, V1 + V1a, V2 + V2a, V3 + V3a, V4 + V4a};
+add_record({Base, V1, V2, V3, V4, V5, V6, V7, V8},
+           {_, V1a, V2a, V3a, V4a, V5a, V6a, V7a, V8a}) ->
+    {Base, V1 + V1a, V2 + V2a, V3 + V3a, V4 + V4a, V5 + V5a, V6 + V6a, V7 + V7a,
+     V8 + V8a};
+add_record({Base, V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13, V14,
+            V15, V16, V17, V18, V19, V20, V21, V22, V23, V24, V25, V26, V27, V28},
+           {_, V1a, V2a, V3a, V4a, V5a, V6a, V7a, V8a, V9a, V10a, V11a, V12a,
+            V13a, V14a, V15a, V16a, V17a, V18a, V19a, V20a, V21a, V22a, V23a,
+            V24a, V25a, V26a, V27a, V28a}) ->
+    {Base, V1 + V1a, V2 + V2a, V3 + V3a, V4 + V4a, V5 + V5a, V6 + V6a, V7 + V7a,
+     V8 + V8a, V9 + V9a, V10 + V10a, V11 + V11a, V12 + V12a, V13 + V13a,
+     V14 + V14a, V15 + V15a, V16 + V16a, V17 + V17a, V18 + V18a, V19 + V19a,
+     V20 + V20a, V21 + V21a, V22 + V22a, V23 + V23a, V24 + V24a, V25 + V25a,
+     V26 + V26a, V27 + V27a, V28 + V28a}.
+
+empty(Key, process_stats) ->
+    {Key, 0};
+empty(Key, Type) when Type == queue_msg_rates;
+                      Type == coarse_node_node_stats;
+                      Type == coarse_conn_stats ->
+    {Key, 0, 0};
+empty(Key, queue_msg_counts) ->
+    {Key, 0, 0, 0};
+empty(Key, deliver_get) ->
+    {Key, 0, 0, 0, 0};
+empty(Key, fine_stats) ->
+    {Key, 0, 0, 0, 0, 0, 0, 0, 0}; 
+empty(Key, coarse_node_stats) ->
+    {Key, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+     0, 0, 0, 0, 0}.
+
+empty_list(process_stats) ->
+    {samples, []};
+empty_list(Type) when Type == queue_msg_rates;
+                      Type == coarse_node_node_stats;
+                      Type == coarse_conn_stats ->
+    {samples, [], []};
+empty_list(queue_msg_counts) ->
+    {samples, [], [], []};
+empty_list(deliver_get) ->
+    {samples, [], [], [], []};
+empty_list(fine_stats) ->
+    {samples, [], [], [], [], [], [], [], []};
+empty_list(coarse_node_stats) ->
+    {samples, [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [],
+     [], [], [], [], [], [], [], [], [], [], [], []}.
+
+
+is_blank({_Key, 0}) ->
+    true;
+is_blank({_Key, 0, 0}) ->
+    true;
+is_blank({_Key, 0, 0, 0}) ->
+    true;
+is_blank({_Key, 0, 0, 0, 0}) ->
+    true;
+is_blank({_Key, 0, 0, 0, 0, 0, 0, 0, 0}) ->
+    true;
+is_blank({_Key, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+          0, 0, 0, 0, 0, 0, 0}) ->
+    true;
+is_blank(_) ->
+    false.
+
+avg_time(_Total, 0) ->
+    0.0;
+avg_time(Total, Count) ->
+    (Total / Count) / ?MICRO_TO_MILLI.
+
+avg_time(Total, Count, BaseTotal, BaseCount) ->
+    avg_time(Total - BaseTotal, Count - BaseCount).
diff --git a/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_stats_gc.erl b/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_stats_gc.erl
new file mode 100644 (file)
index 0000000..44b5277
--- /dev/null
@@ -0,0 +1,219 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ.
+%%
+%%   The Initial Developer of the Original Code is Pivotal Software, Inc.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_stats_gc).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include("rabbit_mgmt_metrics.hrl").
+
+-behaviour(gen_server2).
+
+-export([start_link/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+         code_change/3, handle_pre_hibernate/1]).
+
+-export([name/1]).
+
+-import(rabbit_misc, [pget/3]).
+-import(rabbit_mgmt_db, [pget/2, id_name/1, id/2, lookup_element/2]).
+
+-record(state, {
+          interval,
+          gc_timer,
+          gc_table,
+          gc_index,
+          gc_next_key
+         }).
+
+-define(GC_INTERVAL, 5000).
+-define(GC_MIN_ROWS, 50).
+-define(GC_MIN_RATIO, 0.001).
+
+-define(DROP_LENGTH, 1000).
+
+-define(PROCESS_ALIVENESS_TIMEOUT, 15000).
+
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+start_link(Table) ->
+    case gen_server2:start_link({global, name(Table)}, ?MODULE, [Table], []) of
+        {ok, Pid} -> register(name(Table), Pid), %% [1]
+                     {ok, Pid};
+        Else      -> Else
+    end.
+%% [1] For debugging it's helpful to locally register the name too
+%% since that shows up in places global names don't.
+
+%%----------------------------------------------------------------------------
+%% Internal, gen_server2 callbacks
+%%----------------------------------------------------------------------------
+
+init([Table]) ->
+    {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
+    rabbit_log:info("Statistics garbage collector started for table ~p with interval ~p.~n", [Table, Interval]),
+    {ok, set_gc_timer(#state{interval = Interval,
+                             gc_table = Table,
+                             gc_index = rabbit_mgmt_stats_tables:key_index(Table)}),
+     hibernate,
+     {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call(_Request, _From, State) ->
+    reply(not_understood, State).
+
+handle_cast(_Request, State) ->
+    noreply(State).
+
+handle_info(gc, State) ->
+    noreply(set_gc_timer(gc_batch(State)));
+
+handle_info(_Info, State) ->
+    noreply(State).
+
+terminate(_Arg, _State) ->
+    ok.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+reply(Reply, NewState) -> {reply, Reply, NewState, hibernate}.
+noreply(NewState) -> {noreply, NewState, hibernate}.
+
+set_gc_timer(State) ->
+    TRef = erlang:send_after(?GC_INTERVAL, self(), gc),
+    State#state{gc_timer = TRef}.
+
+handle_pre_hibernate(State) ->
+    {hibernate, State}.
+
+%%----------------------------------------------------------------------------
+%% Internal, utilities
+%%----------------------------------------------------------------------------
+
+floor(TS, #state{interval = Interval}) ->
+    rabbit_mgmt_util:floor(TS, Interval).
+
+%%----------------------------------------------------------------------------
+%% Internal, event-GCing
+%%----------------------------------------------------------------------------
+
+gc_batch(#state{gc_index = Index} = State) ->
+    {ok, Policies} = application:get_env(
+                       rabbitmq_management, sample_retention_policies),
+    {ok, ProcGCTimeout} = application:get_env(
+                            rabbitmq_management, process_stats_gc_timeout),
+    Config = [{policies, Policies}, {process_stats_gc_timeout, ProcGCTimeout}],
+    Total = ets:info(Index, size),
+    Rows = erlang:max(erlang:min(Total, ?GC_MIN_ROWS), round(?GC_MIN_RATIO * Total)),
+    gc_batch(Rows, Config, State).
+
+gc_batch(0, _Config, State) ->
+    State;
+gc_batch(Rows, Config, State = #state{gc_next_key = Cont,
+                                        gc_table = Table,
+                                        gc_index = Index}) ->
+    Select = case Cont of
+                 undefined ->
+                     ets:first(Index);
+                 _ ->
+                     ets:next(Index, Cont)
+             end,
+    NewCont = case Select of
+                  '$end_of_table' ->
+                      undefined;
+                  Key ->
+                      Now = floor(
+                              time_compat:os_system_time(milli_seconds),
+                              State),
+                      gc(Key, Table, Config, Now),
+                      Key
+              end,
+    gc_batch(Rows - 1, Config, State#state{gc_next_key = NewCont}).
+
+gc(Key, Table, Config, Now) ->
+    case lists:member(Table, ?PROC_STATS_TABLES) of
+        true  -> gc_proc(Key, Table, Config, Now);
+        false -> gc_aggr(Key, Table, Config, Now)
+    end.
+
+gc_proc(Key, Table, Config, Now) when Table == connection_stats;
+                                 Table == channel_stats ->
+    Timeout = pget(process_stats_gc_timeout, Config),
+    case ets:lookup(Table, {Key, stats}) of
+        %% Key is already cleared. Skipping
+        []                           -> ok;
+        [{{Key, stats}, _Stats, TS}] -> maybe_gc_process(Key, Table,
+                                                         TS, Now, Timeout)
+    end.
+
+gc_aggr(Key, Table, Config, Now) ->
+    Policies = pget(policies, Config),
+    Policy   = pget(retention_policy(Table), Policies),
+    rabbit_mgmt_stats:gc({Policy, Now}, Table, Key).
+
+maybe_gc_process(Pid, Table, LastStatsTS, Now, Timeout) ->
+    case Now - LastStatsTS < Timeout of
+        true  -> ok;
+        false ->
+            case process_status(Pid) of
+                %% Process doesn't exist on remote node
+                undefined -> rabbit_event:notify(deleted_event(Table),
+                                                 [{pid, Pid}]);
+                %% Remote node is unreachable or process is alive
+                _        -> ok
+            end
+    end.
+
+process_status(Pid) when node(Pid) =:= node() ->
+    process_info(Pid, status);
+process_status(Pid) ->
+    rpc:block_call(node(Pid), erlang, process_info, [Pid, status],
+                   ?PROCESS_ALIVENESS_TIMEOUT).
+
+deleted_event(channel_stats)    -> channel_closed;
+deleted_event(connection_stats) -> connection_closed.
+
+retention_policy(aggr_node_stats_coarse_node_stats) -> global;
+retention_policy(aggr_node_node_stats_coarse_node_node_stats) -> global;
+retention_policy(aggr_vhost_stats_deliver_get) -> global;
+retention_policy(aggr_vhost_stats_fine_stats) -> global;
+retention_policy(aggr_vhost_stats_queue_msg_rates) -> global;
+retention_policy(aggr_vhost_stats_msg_rates_details) -> global;
+retention_policy(aggr_vhost_stats_queue_msg_counts) -> global;
+retention_policy(aggr_vhost_stats_coarse_conn_stats) -> global;
+retention_policy(aggr_queue_stats_fine_stats) -> basic;
+retention_policy(aggr_queue_stats_deliver_get) -> basic;
+retention_policy(aggr_queue_stats_queue_msg_counts) -> basic;
+retention_policy(aggr_queue_stats_queue_msg_rates) -> basic;
+retention_policy(aggr_queue_stats_process_stats) -> basic;
+retention_policy(aggr_exchange_stats_fine_stats) -> basic;
+retention_policy(aggr_connection_stats_coarse_conn_stats) -> basic;
+retention_policy(aggr_connection_stats_process_stats) -> basic;
+retention_policy(aggr_channel_stats_deliver_get) -> basic;
+retention_policy(aggr_channel_stats_fine_stats) -> basic;
+retention_policy(aggr_channel_stats_queue_msg_counts) -> basic;
+retention_policy(aggr_channel_stats_process_stats) -> basic;
+retention_policy(aggr_queue_exchange_stats_fine_stats)   -> detailed;
+retention_policy(aggr_channel_exchange_stats_deliver_get) -> detailed;
+retention_policy(aggr_channel_exchange_stats_fine_stats) -> detailed;
+retention_policy(aggr_channel_queue_stats_deliver_get) -> detailed;
+retention_policy(aggr_channel_queue_stats_fine_stats) -> detailed;
+retention_policy(aggr_channel_queue_stats_queue_msg_counts) -> detailed.
+
+name(Atom) ->
+    list_to_atom((atom_to_list(Atom) ++ "_gc")).
diff --git a/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_stats_tables.erl b/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_stats_tables.erl
new file mode 100644 (file)
index 0000000..2505280
--- /dev/null
@@ -0,0 +1,271 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ.
+%%
+%%   The Initial Developer of the Original Code is Pivotal Software, Inc.
+%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_stats_tables).
+
+-include("rabbit_mgmt_metrics.hrl").
+
+-export([aggr_table/2, aggr_tables/1, type_from_table/1,
+         index/1, key_index/1]).
+
+-spec aggr_table(event_type(), type()) -> table_name().
+aggr_table(queue_stats, deliver_get) ->
+    aggr_queue_stats_deliver_get;
+aggr_table(queue_stats, fine_stats) ->
+    aggr_queue_stats_fine_stats;
+aggr_table(queue_stats, queue_msg_counts) ->
+    aggr_queue_stats_queue_msg_counts;
+aggr_table(queue_stats, queue_msg_rates) ->
+    aggr_queue_stats_queue_msg_rates;
+aggr_table(queue_stats, process_stats) ->
+    aggr_queue_stats_process_stats;
+aggr_table(queue_exchange_stats, fine_stats) ->
+    aggr_queue_exchange_stats_fine_stats;
+aggr_table(vhost_stats, deliver_get) ->
+    aggr_vhost_stats_deliver_get;
+aggr_table(vhost_stats, fine_stats) ->
+    aggr_vhost_stats_fine_stats;
+aggr_table(vhost_stats, queue_msg_rates) ->
+    aggr_vhost_stats_queue_msg_rates;
+aggr_table(vhost_stats, queue_msg_counts) ->
+    aggr_vhost_stats_queue_msg_counts;
+aggr_table(vhost_stats, coarse_conn_stats) ->
+    aggr_vhost_stats_coarse_conn_stats;
+aggr_table(channel_queue_stats, deliver_get) ->
+    aggr_channel_queue_stats_deliver_get;
+aggr_table(channel_queue_stats, fine_stats) ->
+    aggr_channel_queue_stats_fine_stats;
+aggr_table(channel_queue_stats, queue_msg_counts) ->
+    aggr_channel_queue_stats_queue_msg_counts;
+aggr_table(channel_stats, deliver_get) ->
+    aggr_channel_stats_deliver_get;
+aggr_table(channel_stats, fine_stats) ->
+    aggr_channel_stats_fine_stats;
+aggr_table(channel_stats, queue_msg_counts) ->
+    aggr_channel_stats_queue_msg_counts;
+aggr_table(channel_stats, process_stats) ->
+    aggr_channel_stats_process_stats;
+aggr_table(channel_exchange_stats, deliver_get) ->
+    aggr_channel_exchange_stats_deliver_get;
+aggr_table(channel_exchange_stats, fine_stats) ->
+    aggr_channel_exchange_stats_fine_stats;
+aggr_table(exchange_stats, fine_stats) ->
+    aggr_exchange_stats_fine_stats;
+aggr_table(node_stats, coarse_node_stats) ->
+    aggr_node_stats_coarse_node_stats;
+aggr_table(node_node_stats, coarse_node_node_stats) ->
+    aggr_node_node_stats_coarse_node_node_stats;
+aggr_table(connection_stats, coarse_conn_stats) ->
+    aggr_connection_stats_coarse_conn_stats;
+aggr_table(connection_stats, process_stats) ->
+    aggr_connection_stats_process_stats.
+
+-spec aggr_tables(event_type()) -> [{table_name(), type()}].
+aggr_tables(queue_stats) ->
+    [{aggr_queue_stats_fine_stats, fine_stats},
+     {aggr_queue_stats_deliver_get, deliver_get},
+     {aggr_queue_stats_queue_msg_counts, queue_msg_counts},
+     {aggr_queue_stats_queue_msg_rates, queue_msg_rates},
+     {aggr_queue_stats_process_stats, process_stats}];
+aggr_tables(queue_exchange_stats) ->
+    [{aggr_queue_exchange_stats_fine_stats, fine_stats}];
+aggr_tables(vhost_stats) ->
+    [{aggr_vhost_stats_deliver_get, deliver_get},
+     {aggr_vhost_stats_fine_stats, fine_stats},
+     {aggr_vhost_stats_queue_msg_rates, queue_msg_rates},
+     {aggr_vhost_stats_queue_msg_counts, queue_msg_counts},
+     {aggr_vhost_stats_coarse_conn_stats, coarse_conn_stats}];
+aggr_tables(channel_queue_stats) ->
+    [{aggr_channel_queue_stats_deliver_get, deliver_get},
+     {aggr_channel_queue_stats_fine_stats, fine_stats},
+     {aggr_channel_queue_stats_queue_msg_counts, queue_msg_counts}];
+aggr_tables(channel_stats) ->
+    [{aggr_channel_stats_deliver_get, deliver_get},
+     {aggr_channel_stats_fine_stats, fine_stats},
+     {aggr_channel_stats_queue_msg_counts, queue_msg_counts},
+     {aggr_channel_stats_process_stats, process_stats}];
+aggr_tables(channel_exchange_stats) ->
+    [{aggr_channel_exchange_stats_deliver_get, deliver_get},
+     {aggr_channel_exchange_stats_fine_stats, fine_stats}];
+aggr_tables(exchange_stats) ->
+    [{aggr_exchange_stats_fine_stats, fine_stats}];
+aggr_tables(node_stats) ->
+    [{aggr_node_stats_coarse_node_stats, coarse_node_stats}];
+aggr_tables(node_node_stats) ->
+    [{aggr_node_node_stats_coarse_node_node_stats, coarse_node_node_stats}];
+aggr_tables(connection_stats) ->
+    [{aggr_connection_stats_coarse_conn_stats, coarse_conn_stats},
+     {aggr_connection_stats_process_stats, process_stats}].
+
+-spec type_from_table(table_name()) -> type().
+type_from_table(aggr_queue_stats_deliver_get) ->
+    deliver_get;
+type_from_table(aggr_queue_stats_fine_stats) ->
+    fine_stats;
+type_from_table(aggr_queue_stats_queue_msg_counts) ->
+    queue_msg_counts;
+type_from_table(aggr_queue_stats_queue_msg_rates) ->
+    queue_msg_rates;
+type_from_table(aggr_queue_stats_process_stats) ->
+    process_stats;
+type_from_table(aggr_queue_exchange_stats_fine_stats) ->
+    fine_stats;
+type_from_table(aggr_vhost_stats_deliver_get) ->
+    deliver_get;
+type_from_table(aggr_vhost_stats_fine_stats) ->
+    fine_stats;
+type_from_table(aggr_vhost_stats_queue_msg_rates) ->
+    queue_msg_rates;
+type_from_table(aggr_vhost_stats_queue_msg_counts) ->
+    queue_msg_counts;
+type_from_table(aggr_vhost_stats_coarse_conn_stats) ->
+    coarse_conn_stats;
+type_from_table(aggr_channel_queue_stats_deliver_get) ->
+    deliver_get;
+type_from_table(aggr_channel_queue_stats_fine_stats) ->
+    fine_stats;
+type_from_table(aggr_channel_queue_stats_queue_msg_counts) ->
+    queue_msg_counts;
+type_from_table(aggr_channel_stats_deliver_get) ->
+    deliver_get;
+type_from_table(aggr_channel_stats_fine_stats) ->
+    fine_stats;
+type_from_table(aggr_channel_stats_queue_msg_counts) ->
+    queue_msg_counts;
+type_from_table(aggr_channel_stats_process_stats) ->
+    process_stats;
+type_from_table(aggr_channel_exchange_stats_deliver_get) ->
+    deliver_get;
+type_from_table(aggr_channel_exchange_stats_fine_stats) ->
+    fine_stats;
+type_from_table(aggr_exchange_stats_fine_stats) ->
+    fine_stats;
+type_from_table(aggr_node_stats_coarse_node_stats) ->
+    coarse_node_stats;
+type_from_table(aggr_node_node_stats_coarse_node_node_stats) ->
+    coarse_node_node_stats;
+type_from_table(aggr_node_node_stats_coarse_conn_stats) ->
+    coarse_conn_stats;
+type_from_table(aggr_connection_stats_coarse_conn_stats) ->
+    coarse_conn_stats;
+type_from_table(aggr_connection_stats_process_stats) ->
+    process_stats.
+
+index(aggr_queue_stats_deliver_get) ->
+    aggr_queue_stats_deliver_get_index;
+index(aggr_queue_stats_fine_stats) ->
+    aggr_queue_stats_fine_stats_index;
+index(aggr_queue_stats_queue_msg_counts) ->
+    aggr_queue_stats_queue_msg_counts_index;
+index(aggr_queue_stats_queue_msg_rates) ->
+    aggr_queue_stats_queue_msg_rates_index;
+index(aggr_queue_stats_process_stats) ->
+    aggr_queue_stats_process_stats_index;
+index(aggr_queue_exchange_stats_fine_stats) ->
+    aggr_queue_exchange_stats_fine_stats_index;
+index(aggr_vhost_stats_deliver_get) ->
+    aggr_vhost_stats_deliver_get_index;
+index(aggr_vhost_stats_fine_stats) ->
+    aggr_vhost_stats_fine_stats_index;
+index(aggr_vhost_stats_queue_msg_rates) ->
+    aggr_vhost_stats_queue_msg_rates_index;
+index(aggr_vhost_stats_queue_msg_counts) ->
+    aggr_vhost_stats_queue_msg_counts_index;
+index(aggr_vhost_stats_coarse_conn_stats) ->
+    aggr_vhost_stats_coarse_conn_stats_index;
+index(aggr_channel_queue_stats_deliver_get) ->
+    aggr_channel_queue_stats_deliver_get_index;
+index(aggr_channel_queue_stats_fine_stats) ->
+    aggr_channel_queue_stats_fine_stats_index;
+index(aggr_channel_queue_stats_queue_msg_counts) ->
+    aggr_channel_queue_stats_queue_msg_counts_index;
+index(aggr_channel_stats_deliver_get) ->
+    aggr_channel_stats_deliver_get_index;
+index(aggr_channel_stats_fine_stats) ->
+    aggr_channel_stats_fine_stats_index;
+index(aggr_channel_stats_queue_msg_counts) ->
+    aggr_channel_stats_queue_msg_counts_index;
+index(aggr_channel_stats_process_stats) ->
+    aggr_channel_stats_process_stats_index;
+index(aggr_channel_exchange_stats_deliver_get) ->
+    aggr_channel_exchange_stats_deliver_get_index;
+index(aggr_channel_exchange_stats_fine_stats) ->
+    aggr_channel_exchange_stats_fine_stats_index;
+index(aggr_exchange_stats_fine_stats) ->
+    aggr_exchange_stats_fine_stats_index;
+index(aggr_node_stats_coarse_node_stats) ->
+    aggr_node_stats_coarse_node_stats_index;
+index(aggr_node_node_stats_coarse_node_node_stats) ->
+    aggr_node_node_stats_coarse_node_node_stats_index;
+index(aggr_connection_stats_coarse_conn_stats) ->
+    aggr_connection_stats_coarse_conn_stats_index;
+index(aggr_connection_stats_process_stats) ->
+    aggr_connection_stats_process_stats_index.
+
+key_index(connection_stats) ->
+    connection_stats_key_index;
+key_index(channel_stats) ->
+    channel_stats_key_index;
+key_index(aggr_queue_stats_deliver_get) ->
+    aggr_queue_stats_deliver_get_key_index;
+key_index(aggr_queue_stats_fine_stats) ->
+    aggr_queue_stats_fine_stats_key_index;
+key_index(aggr_queue_stats_queue_msg_counts) ->
+    aggr_queue_stats_queue_msg_counts_key_index;
+key_index(aggr_queue_stats_queue_msg_rates) ->
+    aggr_queue_stats_queue_msg_rates_key_index;
+key_index(aggr_queue_stats_process_stats) ->
+    aggr_queue_stats_process_stats_key_index;
+key_index(aggr_queue_exchange_stats_fine_stats) ->
+    aggr_queue_exchange_stats_fine_stats_key_index;
+key_index(aggr_vhost_stats_deliver_get) ->
+    aggr_vhost_stats_deliver_get_key_index;
+key_index(aggr_vhost_stats_fine_stats) ->
+    aggr_vhost_stats_fine_stats_key_index;
+key_index(aggr_vhost_stats_queue_msg_rates) ->
+    aggr_vhost_stats_queue_msg_rates_key_index;
+key_index(aggr_vhost_stats_queue_msg_counts) ->
+    aggr_vhost_stats_queue_msg_counts_key_index;
+key_index(aggr_vhost_stats_coarse_conn_stats) ->
+    aggr_vhost_stats_coarse_conn_stats_key_index;
+key_index(aggr_channel_queue_stats_deliver_get) ->
+    aggr_channel_queue_stats_deliver_get_key_index;
+key_index(aggr_channel_queue_stats_fine_stats) ->
+    aggr_channel_queue_stats_fine_stats_key_index;
+key_index(aggr_channel_queue_stats_queue_msg_counts) ->
+    aggr_channel_queue_stats_queue_msg_counts_key_index;
+key_index(aggr_channel_stats_deliver_get) ->
+    aggr_channel_stats_deliver_get_key_index;
+key_index(aggr_channel_stats_fine_stats) ->
+    aggr_channel_stats_fine_stats_key_index;
+key_index(aggr_channel_stats_queue_msg_counts) ->
+    aggr_channel_stats_queue_msg_counts_key_index;
+key_index(aggr_channel_stats_process_stats) ->
+    aggr_channel_stats_process_stats_key_index;
+key_index(aggr_channel_exchange_stats_deliver_get) ->
+    aggr_channel_exchange_stats_deliver_get_key_index;
+key_index(aggr_channel_exchange_stats_fine_stats) ->
+    aggr_channel_exchange_stats_fine_stats_key_index;
+key_index(aggr_exchange_stats_fine_stats) ->
+    aggr_exchange_stats_fine_stats_key_index;
+key_index(aggr_node_stats_coarse_node_stats) ->
+    aggr_node_stats_coarse_node_stats_key_index;
+key_index(aggr_node_node_stats_coarse_node_node_stats) ->
+    aggr_node_node_stats_coarse_node_node_stats_key_index;
+key_index(aggr_connection_stats_coarse_conn_stats) ->
+    aggr_connection_stats_coarse_conn_stats_key_index;
+key_index(aggr_connection_stats_process_stats) ->
+    aggr_connection_stats_process_stats_key_index.
index 992ff722629b0dfc7c0f1152844efa02fe42267d..326b65e44251c556da81d1fc8b4ef6e5903582c3 100644 (file)
 -export([init/1]).
 -export([start_link/0]).
 
+-include("rabbit_mgmt_metrics.hrl").
 -include_lib("rabbit_common/include/rabbit.hrl").
 
 init([]) ->
+    COLLECTOR = {rabbit_mgmt_event_collector,
+                 {rabbit_mgmt_event_collector, start_link, []},
+                 permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_event_collector]},
+    CCOLLECTOR = {rabbit_mgmt_channel_stats_collector,
+                  {rabbit_mgmt_channel_stats_collector, start_link, []},
+                  permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_channel_stats_collector]},
+    QCOLLECTOR = {rabbit_mgmt_queue_stats_collector,
+                  {rabbit_mgmt_queue_stats_collector, start_link, []},
+                  permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_queue_stats_collector]},
+    GC = [{rabbit_mgmt_stats_gc:name(Table), {rabbit_mgmt_stats_gc, start_link, [Table]},
+           permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_stats_gc]}
+          || Table <- ?AGGR_TABLES],
+    ProcGC = [{rabbit_mgmt_stats_gc:name(Table), {rabbit_mgmt_stats_gc, start_link, [Table]},
+           permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_stats_gc]}
+          || Table <- ?PROC_STATS_TABLES],
     DB = {rabbit_mgmt_db, {rabbit_mgmt_db, start_link, []},
-          permanent, ?MAX_WAIT, worker, [rabbit_mgmt_db]},
-    {ok, {{one_for_one, 10, 10}, [DB]}}.
+          permanent, ?WORKER_WAIT, worker, [rabbit_mgmt_db]},
+    {ok, {{one_for_one, 10, 10}, [COLLECTOR, CCOLLECTOR, QCOLLECTOR, DB] ++ GC ++ ProcGC}}.
 
 start_link() ->
      mirrored_supervisor:start_link(
index db13c1e36e7fd4190c901b9041d9f8fde9f6f4ce..64f4674efe86a62814539d4823c0cc3d7d552e33 100644 (file)
@@ -68,4 +68,4 @@ init([]) ->
 
 sup() ->
     {rabbit_mgmt_sup, {rabbit_mgmt_sup, start_link, []},
-     temporary, ?MAX_WAIT, supervisor, [rabbit_mgmt_sup]}.
+     temporary, ?SUPERVISOR_WAIT, supervisor, [rabbit_mgmt_sup]}.
index d8f0d052730d9e1773a704473188183fff5b2ffe..939cf674b816a833501211bc71d9dac7b26766c0 100644 (file)
@@ -127,7 +127,11 @@ is_authorized(ReqData, Context, Username, Password, ErrorMsg, Fun) ->
                                         [Username, Msg]),
                      not_authorised(Msg, ReqData, Context)
              end,
-    case rabbit_access_control:check_user_pass_login(Username, Password) of
+    AuthProps = [{password, Password}] ++ case vhost(ReqData) of
+        VHost when is_binary(VHost) -> [{vhost, VHost}];
+        _                           -> []
+    end,
+    case rabbit_access_control:check_user_login(Username, AuthProps) of
         {ok, User = #user{tags = Tags}} ->
             IP = peer(ReqData),
             case rabbit_access_control:check_user_loopback(Username, IP) of
@@ -388,6 +392,8 @@ get_dotted_value0([Key], Item) ->
 get_dotted_value0([Key | Keys], Item) ->
     get_dotted_value0(Keys, pget_bin(list_to_binary(Key), Item, [])).
 
+pget_bin(Key, {struct, List}, Default) ->
+    pget_bin(Key, List, Default);
 pget_bin(Key, List, Default) ->
     case lists:partition(fun ({K, _V}) -> a2b(K) =:= Key end, List) of
         {[{_K, V}], _} -> V;
@@ -537,12 +543,12 @@ http_to_amqp(MethodName, ReqData, Context, Transformers, Extra) ->
 props_to_method(MethodName, Props, Transformers, Extra) ->
     Props1 = [{list_to_atom(binary_to_list(K)), V} || {K, V} <- Props],
     props_to_method(
-      MethodName, rabbit_mgmt_format:format(Props1 ++ Extra, Transformers)).
+      MethodName, rabbit_mgmt_format:format(Props1 ++ Extra, {Transformers, true})).
 
 props_to_method(MethodName, Props) ->
     Props1 = rabbit_mgmt_format:format(
                Props,
-               [{fun (Args) -> [{arguments, args(Args)}] end, [arguments]}]),
+               {fun rabbit_mgmt_format:format_args/1, true}),
     FieldNames = ?FRAMING:method_fieldnames(MethodName),
     {Res, _Idx} = lists:foldl(
                     fun (K, {R, Idx}) ->
index 17b5ecb39666d37f2966fa6846cd1d6c4efef13d..c019ea4d8927dff5de7423464bd43ac9deafd4a9 100644 (file)
@@ -17,6 +17,8 @@
 -module(rabbit_mgmt_wm_aliveness_test).
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 -export([resource_exists/2]).
 
 -include("rabbit_mgmt.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, ?MODULE), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {case rabbit_mgmt_util:vhost(ReqData) of
          not_found -> false;
index f03ac3d4063a7816c10c4e47a3b5f2800a951009..5c533ebb5c3b6798a960722ab8aa840bc5fa06c8 100644 (file)
@@ -20,6 +20,8 @@
          content_types_provided/2, content_types_accepted/2,
          is_authorized/2, allowed_methods/2, delete_resource/2,
          args_hash/1]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 %%--------------------------------------------------------------------
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, ?MODULE), Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 content_types_accepted(ReqData, Context) ->
    {[{"application/json", accept_content}], ReqData, Context}.
 
 allowed_methods(ReqData, Context) ->
-    {['HEAD', 'GET', 'DELETE'], ReqData, Context}.
+    {['HEAD', 'GET', 'DELETE', 'OPTIONS'], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     Binding = binding(ReqData),
index c61c25236d79f1f79802967b24a15fda29d08bd2..887291a50d40e1d978f8d3a7364b5b70aaeec417 100644 (file)
@@ -20,6 +20,8 @@
 -export([allowed_methods/2, post_is_create/2, create_path/2]).
 -export([content_types_accepted/2, accept_content/2, resource_exists/2]).
 -export([basic/1, augmented/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 init([Mode]) ->
     {ok, {Mode, #context{}}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, ?MODULE), Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, {Mode, Context}) ->
     {case list_bindings(Mode, ReqData) of
          vhost_not_found -> false;
@@ -44,8 +53,8 @@ content_types_accepted(ReqData, Context) ->
 
 allowed_methods(ReqData, {Mode, Context}) ->
     {case Mode of
-         source_destination -> ['HEAD', 'GET', 'POST'];
-         _                  -> ['HEAD', 'GET']
+         source_destination -> ['HEAD', 'GET', 'POST', 'OPTIONS'];
+         _                  -> ['HEAD', 'GET', 'OPTIONS']
      end, ReqData, {Mode, Context}}.
 
 post_is_create(ReqData, Context) ->
index c9297c33b368401c27a384484b35025c319588ee..e80769a10477e549346c46bc0772be3ce81b66fe 100644 (file)
@@ -17,6 +17,8 @@
 -module(rabbit_mgmt_wm_channel).
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 -export([resource_exists/2]).
 
 -include("rabbit_mgmt.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, ?MODULE), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     case channel(ReqData) of
         not_found -> {false, ReqData, Context};
index 13ebc5a34fb1050e5ddc4d710980fcff6b662b10..15b33a6c32b65d132ba8440f282d6fa5616703c9 100644 (file)
@@ -18,6 +18,8 @@
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
          augmented/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -import(rabbit_misc, [pget/2]).
 
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, ?MODULE), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 to_json(ReqData, Context) ->
     try
         rabbit_mgmt_util:reply_list_or_paginate(augmented(ReqData, Context),
index 34fc13dfb3bc78b8243045a17b02766f4620ebc4..357c2a6ccc591e0d404117aa11ee3d39e1ad56e2 100644 (file)
@@ -20,6 +20,8 @@
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
          augmented/2, resource_exists/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -import(rabbit_misc, [pget/2]).
 
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, ?MODULE), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {rabbit_vhost:exists(rabbit_mgmt_util:id(vhost, ReqData)), ReqData, Context}.
 
index 1bbe8adeaf72176f10bc9b02a8f5a76c4eb119af..28ac785e08d09cc17c0cbfb92a80a63377e89299 100644 (file)
@@ -19,6 +19,8 @@
 -export([init/1, resource_exists/2, to_json/2,
          content_types_provided/2, content_types_accepted/2,
          is_authorized/2, allowed_methods/2, accept_content/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 %%--------------------------------------------------------------------
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, ?MODULE), Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 content_types_accepted(ReqData, Context) ->
    {[{"application/json", accept_content}], ReqData, Context}.
 
 allowed_methods(ReqData, Context) ->
-    {['HEAD', 'GET', 'PUT'], ReqData, Context}.
+    {['HEAD', 'GET', 'PUT', 'OPTIONS'], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     {true, ReqData, Context}.
@@ -46,7 +55,8 @@ to_json(ReqData, Context) ->
 accept_content(ReqData, Context) ->
     rabbit_mgmt_util:with_decode(
       [name], ReqData, Context, fun([Name], _) ->
-                                        rabbit_nodes:set_cluster_name(Name),
+                                        rabbit_nodes:set_cluster_name(
+                                          as_binary(Name)),
                                         {true, ReqData, Context}
                                 end).
 
@@ -55,3 +65,8 @@ is_authorized(ReqData, Context) ->
         'PUT' -> rabbit_mgmt_util:is_authorized_admin(ReqData, Context);
         _     -> rabbit_mgmt_util:is_authorized(ReqData, Context)
     end.
+
+as_binary(Val) when is_binary(Val) ->
+    Val;
+as_binary(Val) when is_list(Val) ->
+    list_to_binary(Val).
index c6c82a30ff5b37dc9b26384fead48d5a273abf57..dae78e848ee82a7744464fe63885c9551839e893 100644 (file)
@@ -18,6 +18,8 @@
 
 -export([init/1, resource_exists/2, to_json/2, content_types_provided/2,
          is_authorized/2, allowed_methods/2, delete_resource/2, conn/1]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, ?MODULE), Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 allowed_methods(ReqData, Context) ->
-    {['HEAD', 'GET', 'DELETE'], ReqData, Context}.
+    {['HEAD', 'GET', 'DELETE', 'OPTIONS'], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     case conn(ReqData) of
index 7f884ce805f4e362637fd552ac6e23be048a25f6..51b0b20a11ed672dec9c2a4f1a7d1c3f7f158bfd 100644 (file)
@@ -17,6 +17,8 @@
 -module(rabbit_mgmt_wm_connection_channels).
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 -export([resource_exists/2]).
 
 -include("rabbit_mgmt.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, ?MODULE), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     case rabbit_mgmt_wm_connection:conn(ReqData) of
         error -> {false, ReqData, Context};
index 00d59db16e8624e02eac6305377cecace5fc60ac..b118b6f49b8a850060efc7555e5d9d86d68bdeaa 100644 (file)
@@ -18,6 +18,8 @@
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
          augmented/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -import(rabbit_misc, [pget/2]).
 
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, ?MODULE), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 to_json(ReqData, Context) ->
     try
         rabbit_mgmt_util:reply_list_or_paginate(augmented(ReqData, Context),
index 8cc34e6f5d76b52f1b7bc28536ca58535192cb6b..d158f4fb7ddad0e6875ce473770673e4b54bac1e 100644 (file)
@@ -20,6 +20,8 @@
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
          augmented/2, resource_exists/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -import(rabbit_misc, [pget/2]).
 
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, ?MODULE), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {rabbit_vhost:exists(rabbit_mgmt_util:id(vhost, ReqData)), ReqData, Context}.
 
index 3ec0f28da42c5a070211e9e8c3635a79b9231f42..c36d0ac6e2e7171af66430d2dbd6a94dd9d95d2a 100644 (file)
@@ -17,6 +17,8 @@
 
 -export([init/1, to_json/2, content_types_provided/2, resource_exists/2,
          is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -import(rabbit_misc, [pget/2]).
 
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {case rabbit_mgmt_util:vhost(ReqData) of
          vhost_not_found -> false;
index 7427f0f5b261a37a47b71dd876fc659b58f7fc15..6d02f3e102d67af7b8521a98ce776558ac093a71 100644 (file)
@@ -19,6 +19,8 @@
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
 -export([content_types_accepted/2, allowed_methods/2, accept_json/2]).
 -export([post_is_create/2, create_path/2, accept_multipart/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -export([apply_defs/3]).
 
 %%--------------------------------------------------------------------
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 content_types_accepted(ReqData, Context) ->
    {[{"application/json", accept_json},
      {"multipart/form-data", accept_multipart}], ReqData, Context}.
 
 allowed_methods(ReqData, Context) ->
-    {['HEAD', 'GET', 'POST'], ReqData, Context}.
+    {['HEAD', 'GET', 'POST', 'OPTIONS'], ReqData, Context}.
 
 post_is_create(ReqData, Context) ->
     {true, ReqData, Context}.
index af838bf2e0c6f8c616ecc18c7a23b747a55d0167..7f7ba767ab01203d5c12c06573f7a8d0e3e1654b 100644 (file)
@@ -20,6 +20,8 @@
          content_types_provided/2, content_types_accepted/2,
          is_authorized/2, allowed_methods/2, accept_content/2,
          delete_resource/2, exchange/1, exchange/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 %%--------------------------------------------------------------------
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 content_types_accepted(ReqData, Context) ->
    {[{"application/json", accept_content}], ReqData, Context}.
 
 allowed_methods(ReqData, Context) ->
-    {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+    {['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS'], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     {case exchange(ReqData) of
@@ -56,7 +65,7 @@ to_json(ReqData, Context) ->
 accept_content(ReqData, Context) ->
     rabbit_mgmt_util:http_to_amqp(
       'exchange.declare', ReqData, Context,
-      [{fun rabbit_mgmt_util:parse_bool/1, [durable, auto_delete, internal]}],
+      fun rabbit_mgmt_format:format_accept_content/1,
       [{exchange, rabbit_mgmt_util:id(exchange, ReqData)}]).
 
 delete_resource(ReqData, Context) ->
index 467c055be76f3274f96893030083c810f3321c81..ff2dbb1d11fb4d00e7ee9e4799ac1e97a02e39f7 100644 (file)
@@ -18,6 +18,8 @@
 
 -export([init/1, resource_exists/2, post_is_create/2, is_authorized/2,
          allowed_methods/2,  content_types_provided/2, process_post/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 %%--------------------------------------------------------------------
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
 allowed_methods(ReqData, Context) ->
-    {['POST'], ReqData, Context}.
+    {['POST', 'OPTIONS'], ReqData, Context}.
 
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {case rabbit_mgmt_wm_exchange:exchange(ReqData) of
          not_found -> false;
index 701cb1067067a210aea7c4ef35dc39fd8da5c2a0..529acbc13c30dd693b688297fd03d2bf121016ca 100644 (file)
@@ -18,6 +18,8 @@
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
          resource_exists/2, basic/1, augmented/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {case exchanges0(ReqData) of
          vhost_not_found -> false;
index b38bf1d795a436a103423c6e4f83500030aca167..5de1b1674791158c7ea245067e647390db1de4d5 100644 (file)
@@ -17,6 +17,8 @@
 -module(rabbit_mgmt_wm_extensions).
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 %%--------------------------------------------------------------------
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 to_json(ReqData, Context) ->
     Modules = rabbit_mgmt_dispatcher:modules([]),
     rabbit_mgmt_util:reply(
diff --git a/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_healthchecks.erl b/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_healthchecks.erl
new file mode 100644 (file)
index 0000000..eb71a3f
--- /dev/null
@@ -0,0 +1,79 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+-module(rabbit_mgmt_wm_healthchecks).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
+-export([resource_exists/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
+content_types_provided(ReqData, Context) ->
+   {[{"application/json", to_json}], ReqData, Context}.
+
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+    {case node0(ReqData) of
+         not_found -> false;
+         _         -> true
+     end, ReqData, Context}.
+
+to_json(ReqData, Context) ->
+    Node = node0(ReqData),
+    try
+        Timeout = case wrq:get_req_header("timeout", ReqData) of
+                      undefined -> 70000;
+                      Val       -> list_to_integer(Val)
+                  end,
+        rabbit_health_check:node(Node, Timeout),
+        rabbit_mgmt_util:reply([{status, ok}], ReqData, Context)
+    catch
+        {node_is_ko, ErrorMsg, _ErrorCode} ->
+            rabbit_mgmt_util:reply([{status, failed},
+                                    {reason, rabbit_mgmt_format:print(ErrorMsg)}],
+                                   ReqData, Context)
+    end.
+
+is_authorized(ReqData, Context) ->
+    rabbit_mgmt_util:is_authorized(ReqData, Context).
+
+node0(ReqData) ->
+    Node = case rabbit_mgmt_util:id(node, ReqData) of
+               none ->
+                   node();
+               Node0 ->
+                   list_to_atom(binary_to_list(Node0))
+           end,
+    case [N || N <- rabbit_mgmt_wm_nodes:all_nodes(ReqData),
+               proplists:get_value(name, N) == Node] of
+        []     -> not_found;
+        [_] -> Node
+    end.
index 0d0c2e12464e1ce721a3c5238f9a298ee22af64a..1bc41f3db152ec12e04eaa93fbc5f8e1718572e1 100644 (file)
@@ -17,6 +17,8 @@
 -module(rabbit_mgmt_wm_node).
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 -export([resource_exists/2]).
 
 -include("rabbit_mgmt.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {case node0(ReqData) of
          not_found -> false;
diff --git a/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory.erl b/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory.erl
new file mode 100644 (file)
index 0000000..37ed784
--- /dev/null
@@ -0,0 +1,92 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ Management Console.
+%%
+%%   The Initial Developer of the Original Code is GoPivotal, Inc.
+%%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_node_memory).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
+-export([resource_exists/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init([Mode]) -> {ok, {Mode, #context{}}}.
+
+finish_request(ReqData, {Mode, Context}) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), {Mode, Context}}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
+content_types_provided(ReqData, Context) ->
+   {[{"application/json", to_json}], ReqData, Context}.
+
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+    {node_exists(ReqData, get_node(ReqData)), ReqData, Context}.
+
+to_json(ReqData, {Mode, Context}) ->
+    rabbit_mgmt_util:reply(augment(Mode, ReqData), ReqData, {Mode, Context}).
+
+is_authorized(ReqData, {Mode, Context}) ->
+    {Res, RD, C} = rabbit_mgmt_util:is_authorized_monitor(ReqData, Context),
+    {Res, RD, {Mode, C}}.
+
+%%--------------------------------------------------------------------
+get_node(ReqData) ->
+    list_to_atom(binary_to_list(rabbit_mgmt_util:id(node, ReqData))).
+
+node_exists(ReqData, Node) ->
+    case [N || N <- rabbit_mgmt_wm_nodes:all_nodes(ReqData),
+               proplists:get_value(name, N) == Node] of
+        [] -> false;
+        [_] -> true
+    end.
+
+augment(Mode, ReqData) ->
+    Node = get_node(ReqData),
+    case node_exists(ReqData, Node) of
+        false ->
+            not_found;
+        true ->
+            case rpc:call(Node, rabbit_vm, memory, [], infinity) of
+                {badrpc, _} -> [{memory, not_available}];
+                Result      -> [{memory, format(Mode, Result)}]
+            end
+    end.
+
+format(absolute, Result) ->
+    Result;
+format(relative, Result) ->
+    {[{total, Total}], Rest} = lists:splitwith(fun({Key, _}) ->
+                                                       Key == total
+                                               end, Result),
+    [{total, 100} | [{K, percentage(V, Total)} || {K, V} <- Rest]].
+
+percentage(Part, Total) ->
+    case round((Part/Total) * 100) of
+        0 when Part =/= 0 ->
+            1;
+        Int ->
+            Int
+    end.
diff --git a/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory_ets.erl b/rabbitmq-server/deps/rabbitmq_management/src/rabbit_mgmt_wm_node_memory_ets.erl
new file mode 100644 (file)
index 0000000..e7ce6dd
--- /dev/null
@@ -0,0 +1,102 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ Management Console.
+%%
+%%   The Initial Developer of the Original Code is GoPivotal, Inc.
+%%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_wm_node_memory_ets).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
+-export([resource_exists/2]).
+
+-include("rabbit_mgmt.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+%%--------------------------------------------------------------------
+
+init([Mode]) -> {ok, {Mode, #context{}}}.
+
+finish_request(ReqData, {Mode, Context}) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), {Mode, Context}}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
+content_types_provided(ReqData, Context) ->
+   {[{"application/json", to_json}], ReqData, Context}.
+
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
+resource_exists(ReqData, Context) ->
+    {node_exists(ReqData, get_node(ReqData)), ReqData, Context}.
+
+to_json(ReqData, {Mode, Context}) ->
+    rabbit_mgmt_util:reply(augment(Mode, ReqData), ReqData, {Mode, Context}).
+
+is_authorized(ReqData, {Mode, Context}) ->
+    {Res, RD, C} = rabbit_mgmt_util:is_authorized_monitor(ReqData, Context),
+    {Res, RD, {Mode, C}}.
+
+%%--------------------------------------------------------------------
+get_node(ReqData) ->
+    list_to_atom(binary_to_list(rabbit_mgmt_util:id(node, ReqData))).
+
+get_filter(ReqData) ->
+    case rabbit_mgmt_util:id(filter, ReqData) of
+        none                        -> all;
+        <<"management">>            -> rabbit_mgmt_event_collector;
+        Other when is_binary(Other) -> list_to_atom(binary_to_list(Other));
+        _                           -> all
+    end.
+
+node_exists(ReqData, Node) ->
+    case [N || N <- rabbit_mgmt_wm_nodes:all_nodes(ReqData),
+               proplists:get_value(name, N) == Node] of
+        [] -> false;
+        [_] -> true
+    end.
+
+augment(Mode, ReqData) ->
+    Node = get_node(ReqData),
+    Filter = get_filter(ReqData),
+    case node_exists(ReqData, Node) of
+        false ->
+            not_found;
+        true ->
+            case rpc:call(Node, rabbit_vm, ets_tables_memory,
+                          [Filter], infinity) of
+                {badrpc, _} -> [{ets_tables_memory, not_available}];
+                []          -> [{ets_tables_memory, no_tables}];
+                Result      -> [{ets_tables_memory, format(Mode, Result)}]
+            end
+    end.
+
+format(absolute, Result) ->
+    Total = lists:sum([V || {_K,V} <- Result]),
+    [{total, Total} | Result];
+format(relative, Result) ->
+    Total = lists:sum([V || {_K,V} <- Result]),
+    [{total, 100} | [{K, percentage(V, Total)} || {K, V} <- Result]].
+
+percentage(Part, Total) ->
+    case round((Part/Total) * 100) of
+        0 when Part =/= 0 ->
+            1;
+        Int ->
+            Int
+    end.
index ccfa661ec9f1211a88c9f32d345b660535b54160..e1cbf089af0e0de10d348852dd2f9c42db997697 100644 (file)
@@ -17,7 +17,9 @@
 -module(rabbit_mgmt_wm_nodes).
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
 -export([all_nodes/1, all_nodes_raw/0]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 to_json(ReqData, Context) ->
     try
         rabbit_mgmt_util:reply_list(all_nodes(ReqData), ReqData, Context)
index 9edafa1ddc1fc5a619cf0e2bef23832e30499f49..fe36948b2230fac94a25dbb2114d51438ccfc765 100644 (file)
@@ -17,6 +17,8 @@
 -module(rabbit_mgmt_wm_overview).
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -import(rabbit_misc, [pget/2, pget/3]).
 
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, ?MODULE), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 to_json(ReqData, Context = #context{user = User = #user{tags = Tags}}) ->
     {ok, RatesMode} = application:get_env(rabbitmq_management, rates_mode),
     %% NB: this duplicates what's in /nodes but we want a global idea
index bc02905e9b0a503fe9a0d0f4e8165782fe558296..a72693a57d1ff2822b0821ec5d4dab380d02802f 100644 (file)
@@ -20,6 +20,8 @@
          content_types_provided/2, content_types_accepted/2,
          is_authorized/2, allowed_methods/2, accept_content/2,
          delete_resource/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -import(rabbit_misc, [pget/2]).
 
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 content_types_accepted(ReqData, Context) ->
    {[{"application/json", accept_content}], ReqData, Context}.
 
 allowed_methods(ReqData, Context) ->
-    {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+    {['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS'], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     {case parameter(ReqData) of
@@ -47,7 +56,8 @@ resource_exists(ReqData, Context) ->
      end, ReqData, Context}.
 
 to_json(ReqData, Context) ->
-    rabbit_mgmt_util:reply(rabbit_mgmt_format:parameter(parameter(ReqData)),
+    rabbit_mgmt_util:reply(rabbit_mgmt_format:parameter(
+        rabbit_mgmt_wm_parameters:fix_shovel_publish_properties(parameter(ReqData))),
                            ReqData, Context).
 
 accept_content(ReqData, Context = #context{user = User}) ->
index 80ffc1a063c053a54b06d2555aa73ac348394a79..696879d3efa2f553b19f579b10106e8c6c2f9691 100644 (file)
@@ -18,6 +18,9 @@
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
          resource_exists/2, basic/1]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
+-export([fix_shovel_publish_properties/1]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {case basic(ReqData) of
          not_found -> false;
@@ -59,5 +72,24 @@ basic(ReqData) ->
           end,
     case Raw of
         not_found -> not_found;
-        _         -> [rabbit_mgmt_format:parameter(P) || P <- Raw]
+        _         -> [rabbit_mgmt_format:parameter(fix_shovel_publish_properties(P)) || P <- Raw]
+    end.
+
+%% Hackish fix to make sure we return a JSON object instead of an empty list
+%% when the publish-properties value is empty. Should be removed in 3.7.0
+%% when we switch to a new JSON library.
+fix_shovel_publish_properties(P) ->
+    case lists:keyfind(component, 1, P) of
+        {_, <<"shovel">>} ->
+            case lists:keytake(value, 1, P) of
+                {value, {_, Values}, P2} ->
+                    case lists:keytake(<<"publish-properties">>, 1, Values) of
+                        {_, {_, []}, Values2} ->
+                            P2 ++ [{value, Values2 ++ [{<<"publish-properties">>, empty_struct}]}];
+                        _ ->
+                            P
+                    end;
+                _ -> P
+            end;
+        _ -> P
     end.
index a87e58bd773bd35570927461f5c8cf358bb8a1de..21cc29408d7792a976967d7a536e735741f0fb68 100644 (file)
@@ -20,6 +20,8 @@
          content_types_provided/2, content_types_accepted/2,
          is_authorized/2, allowed_methods/2, accept_content/2,
          delete_resource/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 %%--------------------------------------------------------------------
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 content_types_accepted(ReqData, Context) ->
    {[{"application/json", accept_content}], ReqData, Context}.
 
 allowed_methods(ReqData, Context) ->
-    {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+    {['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS'], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     {case perms(ReqData) of
index 4caa4a1ee4f8fa162f08dce83211a0c943c31cc4..46ad475ce202b8682c492dd75f973a8eb1e9c6c0 100644 (file)
@@ -17,7 +17,9 @@
 -module(rabbit_mgmt_wm_permissions).
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
 -export([permissions/0]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 to_json(ReqData, Context) ->
     rabbit_mgmt_util:reply_list(permissions(), ["vhost", "user"],
                                 ReqData, Context).
index d785a65211d9e3c2cad0b9a0f238b75c0638f7b0..bcf8d383dea15ee718d808e682de5c679aa815f9 100644 (file)
@@ -18,6 +18,8 @@
 
 -export([init/1, to_json/2, content_types_provided/2, resource_exists/2,
          is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {case rabbit_mgmt_wm_user:user(ReqData) of
          {ok, _}    -> true;
index 5dbc8d9b8fbff9b0b0d862be91f41a897ca98188..ba220b8945f8dbe9ebd28bfebbd4077287784127 100644 (file)
@@ -18,6 +18,8 @@
 
 -export([init/1, to_json/2, content_types_provided/2, resource_exists/2,
          is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {rabbit_vhost:exists(rabbit_mgmt_wm_vhost:id(ReqData)), ReqData, Context}.
 
index dbc01778377bd1991e99b80abd59e61f6a97d02a..e51074565993e950c9b206072f492fd3904e90ea 100644 (file)
@@ -18,6 +18,8 @@
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
          resource_exists/2, basic/1]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {case basic(ReqData) of
          not_found -> false;
@@ -42,7 +54,7 @@ to_json(ReqData, Context) ->
       ["priority"], ReqData, Context).
 
 is_authorized(ReqData, Context) ->
-    rabbit_mgmt_util:is_authorized_policies(ReqData, Context).
+    rabbit_mgmt_util:is_authorized_vhost(ReqData, Context).
 
 %%--------------------------------------------------------------------
 
index b2b98b04956f28e7c5fa4f0f76df3f299fcb755e..3460a87a521b737f30aa78667cfc554ab446f14c 100644 (file)
@@ -20,6 +20,8 @@
          content_types_provided/2, content_types_accepted/2,
          is_authorized/2, allowed_methods/2, accept_content/2,
          delete_resource/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -import(rabbit_misc, [pget/2]).
 
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 content_types_accepted(ReqData, Context) ->
    {[{"application/json", accept_content}], ReqData, Context}.
 
 allowed_methods(ReqData, Context) ->
-    {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+    {['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS'], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     {case policy(ReqData) of
index 6436c73052fee6236801cb8bbaf59e1e62dd0fa4..d1e87a59d28e0ac48b35a4399fd23968872f9b35 100644 (file)
@@ -20,6 +20,8 @@
          content_types_provided/2, content_types_accepted/2,
          is_authorized/2, allowed_methods/2, accept_content/2,
          delete_resource/2, queue/1, queue/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 %%--------------------------------------------------------------------
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 content_types_accepted(ReqData, Context) ->
    {[{"application/json", accept_content}], ReqData, Context}.
 
 allowed_methods(ReqData, Context) ->
-    {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+    {['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS'], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     {case queue(ReqData) of
@@ -54,9 +63,9 @@ to_json(ReqData, Context) ->
     end.
 
 accept_content(ReqData, Context) ->
-   rabbit_mgmt_util:http_to_amqp(
+    rabbit_mgmt_util:http_to_amqp(
       'queue.declare', ReqData, Context,
-      [{fun rabbit_mgmt_util:parse_bool/1, [durable, auto_delete]}],
+      fun rabbit_mgmt_format:format_accept_content/1,
       [{queue, rabbit_mgmt_util:id(queue, ReqData)}]).
 
 delete_resource(ReqData, Context) ->
index a6017643dccefc7248724ea708ead4010c3f62d0..b75e2dc4899a538373ab14dc973875c29cd94b71 100644 (file)
@@ -18,6 +18,8 @@
 
 -export([init/1, resource_exists/2, post_is_create/2, is_authorized/2,
          allowed_methods/2, process_post/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
 allowed_methods(ReqData, Context) ->
-    {['POST'], ReqData, Context}.
+    {['POST', 'OPTIONS'], ReqData, Context}.
+
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     {case rabbit_mgmt_wm_queue:queue(ReqData) of
index 3a4b4889fa9fc47bb6949c0b6e48c41a93f98c05..85cd5c8cd696254f6bf3cfb50880f339f5cedcfa 100644 (file)
@@ -18,6 +18,8 @@
 
 -export([init/1, resource_exists/2, post_is_create/2, is_authorized/2,
   allowed_methods/2, process_post/2, content_types_provided/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
 allowed_methods(ReqData, Context) ->
-    {['POST'], ReqData, Context}.
+    {['POST', 'OPTIONS'], ReqData, Context}.
 
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     {case rabbit_mgmt_wm_queue:queue(ReqData) of
index a275423fb8de2fc25fb0fe93b5ad23e3b716cc6a..b4d94badb2fab0b8cfd2cea0c5861642eeb27115 100644 (file)
@@ -18,6 +18,8 @@
 
 -export([init/1, resource_exists/2, is_authorized/2, allowed_methods/2,
          delete_resource/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 %%--------------------------------------------------------------------
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
 allowed_methods(ReqData, Context) ->
-    {['DELETE'], ReqData, Context}.
+    {['DELETE', 'OPTIONS'], ReqData, Context}.
+
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     {case rabbit_mgmt_wm_queue:queue(ReqData) of
index 026e8ad2fcd8699a1d134d6b83e2f00903bca47e..2a67890d8f85d6d12c21223b89e1011a172913ee 100644 (file)
@@ -18,6 +18,8 @@
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2,
          resource_exists/2, basic/1, augmented/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 resource_exists(ReqData, Context) ->
     {case queues0(ReqData) of
          vhost_not_found -> false;
@@ -52,10 +64,9 @@ is_authorized(ReqData, Context) ->
 %%--------------------------------------------------------------------
 
 augmented(ReqData, Context) ->
-    rabbit_mgmt_format:strip_pids(
-      rabbit_mgmt_db:augment_queues(
-        rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context),
-        rabbit_mgmt_util:range_ceil(ReqData), basic)).
+    rabbit_mgmt_db:augment_queues(
+      rabbit_mgmt_util:filter_vhost(basic(ReqData), ReqData, Context),
+      rabbit_mgmt_util:range_ceil(ReqData), basic).
 
 basic(ReqData) ->
     [rabbit_mgmt_format:queue(Q) || Q <- queues0(ReqData)] ++
index a440c50360016c9b42dcb00e67a73b5e4ba71fb1..b62f17a8055033dae7a4bcb0d2323b963f29e611 100644 (file)
@@ -20,6 +20,8 @@
          content_types_provided/2, content_types_accepted/2,
          is_authorized/2, allowed_methods/2, accept_content/2,
          delete_resource/2, user/1, put_user/1, put_user/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -import(rabbit_misc, [pget/2]).
 
 %%--------------------------------------------------------------------
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 content_types_accepted(ReqData, Context) ->
    {[{"application/json", accept_content}], ReqData, Context}.
 
 allowed_methods(ReqData, Context) ->
-    {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+    {['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS'], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     {case user(ReqData) of
index 745a91f8d232d3b2fb03e9f29c0d2a93fc6b6ea9..c6ba4f531ed060c120c34d37824c7c65214c2e88 100644 (file)
@@ -17,6 +17,8 @@
 -module(rabbit_mgmt_wm_users).
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 -export([users/0]).
 
 -import(rabbit_misc, [pget/2]).
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 to_json(ReqData, Context) ->
     rabbit_mgmt_util:reply_list(users(), ReqData, Context).
 
index 7a1fa4f2b0721d33fa5716f43c1ef4aaf873c96f..09c8c526808a7484b792154aececd3c416f50736 100644 (file)
@@ -20,6 +20,8 @@
          content_types_provided/2, content_types_accepted/2,
          is_authorized/2, allowed_methods/2, accept_content/2,
          delete_resource/2, id/1, put_vhost/2]).
+-export([finish_request/2]).
+-export([encodings_provided/2]).
 
 -import(rabbit_misc, [pget/2]).
 
 %%--------------------------------------------------------------------
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 content_types_accepted(ReqData, Context) ->
    {[{"application/json", accept_content}], ReqData, Context}.
 
 allowed_methods(ReqData, Context) ->
-    {['HEAD', 'GET', 'PUT', 'DELETE'], ReqData, Context}.
+    {['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS'], ReqData, Context}.
 
 resource_exists(ReqData, Context) ->
     {rabbit_vhost:exists(id(ReqData)), ReqData, Context}.
index ae24131db4b4c87da89183066ad269a03f118de0..faeb0f51d758d545372cd2c7f572056d00791655 100644 (file)
@@ -17,6 +17,8 @@
 -module(rabbit_mgmt_wm_vhosts).
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 -export([basic/0, augmented/2]).
 
 -include("rabbit_mgmt.hrl").
 
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 to_json(ReqData, Context) ->
     try
         rabbit_mgmt_util:reply_list_or_paginate(
index 8899c94c970676d6788333536eefcfc5ae30697f..5d262a7fb3099fe75a24aee2d58a1fc9b2411627 100644 (file)
@@ -17,6 +17,8 @@
 -module(rabbit_mgmt_wm_whoami).
 
 -export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+-export([finish_request/2, allowed_methods/2]).
+-export([encodings_provided/2]).
 
 -include("rabbit_mgmt.hrl").
 -include_lib("webmachine/include/webmachine.hrl").
 %%--------------------------------------------------------------------
 init(_Config) -> {ok, #context{}}.
 
+finish_request(ReqData, Context) ->
+    {ok, rabbit_mgmt_cors:set_headers(ReqData, Context), Context}.
+
+allowed_methods(ReqData, Context) ->
+    {['HEAD', 'GET', 'OPTIONS'], ReqData, Context}.
+
 content_types_provided(ReqData, Context) ->
    {[{"application/json", to_json}], ReqData, Context}.
 
+encodings_provided(ReqData, Context) ->
+    {[{"identity", fun(X) -> X end},
+     {"gzip", fun(X) -> zlib:gzip(X) end}], ReqData, Context}.
+
 to_json(ReqData, Context = #context{user = User}) ->
     rabbit_mgmt_util:reply(rabbit_mgmt_format:user(User), ReqData, Context).
 
index 33e27c7f0b69c07e7bbe8db441555dd9a7fbf4d4..bfa1ffc6553010421232c9ef2b582bf81f5d7954 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_management,
  [{description, "RabbitMQ Management Console"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {mod, {rabbit_mgmt_app, []}},
           %% List of {MaxAgeInSeconds, SampleEveryNSeconds}
           [{global,   [{605, 5}, {3660, 60}, {29400, 600}, {86400, 1800}]},
            {basic,    [{605, 5}, {3600, 60}]},
-           {detailed, [{10, 5}]}]}
+           {detailed, [{10, 5}]}]},
+         {process_stats_gc_timeout, 300000},
+         {stats_event_max_backlog, 250},
+         {cors_allow_origins, []},
+         {cors_max_age, 1800}
         ]},
   {applications, [kernel, stdlib, rabbit, xmerl, rabbitmq_web_dispatch,
                   amqp_client, rabbitmq_management_agent]}]}.
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_clustering_SUITE.erl b/rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_clustering_SUITE.erl
new file mode 100644 (file)
index 0000000..8f348a6
--- /dev/null
@@ -0,0 +1,144 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_clustering_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("include/rabbit_mgmt_test.hrl").
+
+-import(rabbit_ct_broker_helpers, [get_node_config/3, restart_node/2]).
+-import(rabbit_mgmt_test_util, [http_get/2, http_put/4, http_delete/3]).
+-import(rabbit_misc, [pget/2]).
+
+-compile(export_all).
+
+all() ->
+    [
+     {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+     {non_parallel_tests, [], [
+                               list_cluster_nodes_test,
+                               multi_node_case1_test
+                              ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    inets:start(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+                                                    {rmq_nodename_suffix, ?MODULE},
+                                                    {rmq_nodes_count, 2}
+                                                   ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+                                      rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+                                         rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+list_cluster_nodes_test(Config) ->
+    %% see rmq_nodes_count in init_per_suite
+    ?assertEqual(2, length(http_get(Config, "/nodes"))),
+    passed.
+
+multi_node_case1_test(Config) ->
+    Nodename1 = get_node_config(Config, 0, nodename),
+    Nodename2 = get_node_config(Config, 1, nodename),
+    Policy = [{pattern,    <<".*">>},
+              {definition, [{'ha-mode', <<"all">>}]}],
+    http_put(Config, "/policies/%2f/HA", Policy, ?NO_CONTENT),
+    QArgs = [{node, list_to_binary(atom_to_list(Nodename2))}],
+    http_put(Config, "/queues/%2f/ha-queue", QArgs, ?NO_CONTENT),
+
+    Q = wait_for(Config, "/queues/%2f/ha-queue"),
+    assert_node(Nodename2, pget(node, Q)),
+    assert_single_node(Nodename1, pget(slave_nodes, Q)),
+    assert_single_node(Nodename1, pget(synchronised_slave_nodes, Q)),
+    %% restart node2
+    restart_node(Config, 1),
+
+    Q2 = wait_for(Config, "/queues/%2f/ha-queue"),
+    assert_node(Nodename1, pget(node, Q2)),
+    assert_single_node(Nodename2, pget(slave_nodes, Q2)),
+    assert_single_node(Nodename2, pget(synchronised_slave_nodes, Q2)),
+    http_delete(Config, "/queues/%2f/ha-queue", ?NO_CONTENT),
+    http_delete(Config, "/policies/%2f/HA", ?NO_CONTENT),
+
+    passed.
+
+%%----------------------------------------------------------------------------
+
+wait_for(Config, Path) ->
+    wait_for(Config, Path, [slave_nodes, synchronised_slave_nodes]).
+
+wait_for(Config, Path, Keys) ->
+    wait_for(Config, Path, Keys, 1000).
+
+wait_for(_Config, Path, Keys, 0) ->
+    exit({timeout, {Path, Keys}});
+
+wait_for(Config, Path, Keys, Count) ->
+    Res = http_get(Config, Path),
+    case present(Keys, Res) of
+        false -> timer:sleep(10),
+                 wait_for(Config, Path, Keys, Count - 1);
+        true  -> Res
+    end.
+
+present(Keys, Res) ->
+    lists:all(fun (Key) ->
+                      X = pget(Key, Res),
+                      X =/= [] andalso X =/= undefined
+              end, Keys).
+
+assert_single_node(Exp, Act) ->
+    ?assertEqual(1, length(Act)),
+    assert_node(Exp, hd(Act)).
+
+assert_nodes(Exp, Act0) ->
+    Act = [extract_node(A) || A <- Act0],
+    ?assertEqual(length(Exp), length(Act)),
+    [?assert(lists:member(E, Act)) || E <- Exp].
+
+assert_node(Exp, Act) ->
+    ?assertEqual(Exp, list_to_atom(binary_to_list(Act))).
+
+extract_node(N) ->
+    list_to_atom(hd(string:tokens(binary_to_list(N), "@"))).
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl b/rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl
new file mode 100644 (file)
index 0000000..6e82fd1
--- /dev/null
@@ -0,0 +1,2046 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_http_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include("include/rabbit_mgmt_test.hrl").
+
+-import(rabbit_ct_client_helpers, [close_connection/1, close_channel/1, open_unmanaged_connection/1]).
+-import(rabbit_mgmt_test_util, [assert_list/2, assert_item/2, test_item/2,
+                                assert_keys/2, assert_no_keys/2,
+                                http_get/2, http_get/3, http_get/5,
+                                http_put/4, http_put/6,
+                                http_post/4, http_post/6,
+                                http_delete/3, http_delete/5,
+                                http_put_raw/4, http_post_accept_json/4,
+                                req/4, auth_header/2,
+                                amqp_port/1]).
+
+-import(rabbit_misc, [pget/2]).
+
+-compile(export_all).
+
+all() ->
+    [
+     {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+     {non_parallel_tests, [], [
+                               overview_test,
+                               auth_test,
+                               cluster_name_test,
+                               nodes_test,
+                               memory_test,
+                               ets_tables_memory_test,
+                               vhosts_test,
+                               vhosts_trace_test,
+                               users_test,
+                               users_legacy_administrator_test,
+                               permissions_validation_test,
+                               permissions_list_test,
+                               permissions_test,
+                               connections_test,
+                               multiple_invalid_connections_test,
+                               exchanges_test,
+                               queues_test,
+                               bindings_test,
+                               bindings_post_test,
+                               bindings_e2e_test,
+                               permissions_administrator_test,
+                               permissions_vhost_test,
+                               permissions_amqp_test,
+                               permissions_connection_channel_consumer_test,
+                               consumers_test,
+                               definitions_test,
+                               definitions_vhost_test,
+                               definitions_password_test,
+                               definitions_remove_things_test,
+                               definitions_server_named_queue_test,
+                               aliveness_test,
+                               healthchecks_test,
+                               arguments_test,
+                               arguments_table_test,
+                               queue_purge_test,
+                               queue_actions_test,
+                               exclusive_consumer_test,
+                               exclusive_queue_test,
+                               connections_channels_pagination_test,
+                               exchanges_pagination_test,
+                               exchanges_pagination_permissions_test,
+                               queue_pagination_test,
+                               queues_pagination_permissions_test,
+                               samples_range_test,
+                               sorting_test,
+                               format_output_test,
+                               columns_test,
+                               get_test,
+                               get_fail_test,
+                               publish_test,
+                               publish_accept_json_test,
+                               publish_fail_test,
+                               publish_base64_test,
+                               publish_unrouted_test,
+                               if_empty_unused_test,
+                               parameters_test,
+                               policy_test,
+                               policy_permissions_test,
+                               issue67_test,
+                               extensions_test,
+                               cors_test
+                              ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    inets:start(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+                                                    {rmq_nodename_suffix, ?MODULE}
+                                                   ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+                                      rabbit_ct_broker_helpers:setup_steps() ++
+                                          rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+                                         rabbit_ct_client_helpers:teardown_steps() ++
+                                             rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+
+overview_test(Config) ->
+    %% Rather crude, but this req doesn't say much and at least this means it
+    %% didn't blow up.
+    true = 0 < length(pget(listeners, http_get(Config, "/overview"))),
+    http_put(Config, "/users/myuser", [{password, <<"myuser">>},
+                                       {tags,     <<"management">>}], [?CREATED, ?NO_CONTENT]),
+    http_get(Config, "/overview", "myuser", "myuser", ?OK),
+    http_delete(Config, "/users/myuser", ?NO_CONTENT),
+
+    passed.
+
+cluster_name_test(Config) ->
+    http_put(Config, "/users/myuser", [{password, <<"myuser">>},
+                                       {tags,     <<"management">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/cluster-name", [{name, "foo"}], "myuser", "myuser", ?NOT_AUTHORISED),
+    http_put(Config, "/cluster-name", [{name, "foo"}], ?NO_CONTENT),
+    [{name, <<"foo">>}] = http_get(Config, "/cluster-name", "myuser", "myuser", ?OK),
+    http_delete(Config, "/users/myuser", ?NO_CONTENT),
+    passed.
+
+nodes_test(Config) ->
+    http_put(Config, "/users/user", [{password, <<"user">>},
+                                     {tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/users/monitor", [{password, <<"monitor">>},
+                                        {tags, <<"monitoring">>}], [?CREATED, ?NO_CONTENT]),
+    DiscNode = [{type, <<"disc">>}, {running, true}],
+    assert_list([DiscNode], http_get(Config, "/nodes")),
+    assert_list([DiscNode], http_get(Config, "/nodes", "monitor", "monitor", ?OK)),
+    http_get(Config, "/nodes", "user", "user", ?NOT_AUTHORISED),
+    [Node] = http_get(Config, "/nodes"),
+    Path = "/nodes/" ++ binary_to_list(pget(name, Node)),
+    assert_item(DiscNode, http_get(Config, Path, ?OK)),
+    assert_item(DiscNode, http_get(Config, Path, "monitor", "monitor", ?OK)),
+    http_get(Config, Path, "user", "user", ?NOT_AUTHORISED),
+    http_delete(Config, "/users/user", ?NO_CONTENT),
+    http_delete(Config, "/users/monitor", ?NO_CONTENT),
+    passed.
+
+memory_test(Config) ->
+    [Node] = http_get(Config, "/nodes"),
+    Path = "/nodes/" ++ binary_to_list(pget(name, Node)) ++ "/memory",
+    Result = http_get(Config, Path, ?OK),
+    assert_keys([memory], Result),
+    Keys = [total, connection_readers, connection_writers, connection_channels,
+            connection_other, queue_procs, queue_slave_procs, plugins,
+            other_proc, mnesia, mgmt_db, msg_index, other_ets, binary, code,
+            atom, other_system],
+    assert_keys(Keys, pget(memory, Result)),
+    http_get(Config, "/nodes/nonode/memory", ?NOT_FOUND),
+    %% Relative memory as a percentage of the total
+    Result1 = http_get(Config, Path ++ "/relative", ?OK),
+    assert_keys([memory], Result1),
+    Breakdown = pget(memory, Result1),
+    assert_keys(Keys, Breakdown),
+    assert_item([{total, 100}], Breakdown),
+    assert_percentage(Breakdown),
+    http_get(Config, "/nodes/nonode/memory/relative", ?NOT_FOUND),
+    passed.
+
+ets_tables_memory_test(Config) ->
+    [Node] = http_get(Config, "/nodes"),
+    Path = "/nodes/" ++ binary_to_list(pget(name, Node)) ++ "/memory/ets",
+    Result = http_get(Config, Path, ?OK),
+    assert_keys([ets_tables_memory], Result),
+    NonMgmtKeys = [rabbit_vhost,rabbit_user_permission],
+    Keys = [total, old_stats_fine_index,
+            connection_stats_key_index, channel_stats_key_index,
+            old_stats, node_node_stats, node_stats, consumers_by_channel,
+            consumers_by_queue, channel_stats, connection_stats, queue_stats],
+    assert_keys(Keys ++ NonMgmtKeys, pget(ets_tables_memory, Result)),
+    http_get(Config, "/nodes/nonode/memory/ets", ?NOT_FOUND),
+    %% Relative memory as a percentage of the total
+    ResultRelative = http_get(Config, Path ++ "/relative", ?OK),
+    assert_keys([ets_tables_memory], ResultRelative),
+    Breakdown = pget(ets_tables_memory, ResultRelative),
+    assert_keys(Keys, Breakdown),
+    assert_item([{total, 100}], Breakdown),
+    assert_percentage(Breakdown),
+    http_get(Config, "/nodes/nonode/memory/ets/relative", ?NOT_FOUND),
+
+    ResultMgmt = http_get(Config, Path ++ "/management", ?OK),
+    assert_keys([ets_tables_memory], ResultMgmt),
+    assert_keys(Keys, pget(ets_tables_memory, ResultMgmt)),
+    assert_no_keys(NonMgmtKeys, pget(ets_tables_memory, ResultMgmt)),
+
+    ResultMgmtRelative = http_get(Config, Path ++ "/management/relative", ?OK),
+    assert_keys([ets_tables_memory], ResultMgmtRelative),
+    assert_keys(Keys, pget(ets_tables_memory, ResultMgmtRelative)),
+    assert_no_keys(NonMgmtKeys, pget(ets_tables_memory, ResultMgmtRelative)),
+    assert_item([{total, 100}], pget(ets_tables_memory, ResultMgmtRelative)),
+    assert_percentage(pget(ets_tables_memory, ResultMgmtRelative)),
+
+    ResultUnknownFilter = http_get(Config, Path ++ "/blahblah", ?OK),
+    [{ets_tables_memory, <<"no_tables">>}] = ResultUnknownFilter,
+    passed.
+
+assert_percentage(Breakdown) ->
+    Total = lists:sum([P || {K, P} <- Breakdown, K =/= total]),
+    Count = length(Breakdown) - 1,
+    %% Rounding up and down can lose some digits. Never more than the number
+    %% of items in the breakdown.
+    case ((Total =< 100 + Count) andalso (Total >= 100 - Count)) of
+        false ->
+            throw({bad_percentage, Total, Breakdown});
+        true ->
+            ok
+    end.
+
+auth_test(Config) ->
+    http_put(Config, "/users/user", [{password, <<"user">>},
+                                     {tags, <<"">>}], [?CREATED, ?NO_CONTENT]),
+    test_auth(Config, ?NOT_AUTHORISED, []),
+    test_auth(Config, ?NOT_AUTHORISED, [auth_header("user", "user")]),
+    test_auth(Config, ?NOT_AUTHORISED, [auth_header("guest", "gust")]),
+    test_auth(Config, ?OK, [auth_header("guest", "guest")]),
+    http_delete(Config, "/users/user", ?NO_CONTENT),
+    passed.
+
+%% This test is rather over-verbose as we're trying to test understanding of
+%% Webmachine
+vhosts_test(Config) ->
+    assert_list([[{name, <<"/">>}]], http_get(Config, "/vhosts")),
+    %% Create a new one
+    http_put(Config, "/vhosts/myvhost", none, [?CREATED, ?NO_CONTENT]),
+    %% PUT should be idempotent
+    http_put(Config, "/vhosts/myvhost", none, ?NO_CONTENT),
+    %% Check it's there
+    assert_list([[{name, <<"/">>}], [{name, <<"myvhost">>}]],
+                http_get(Config, "/vhosts")),
+    %% Check individually
+    assert_item([{name, <<"/">>}], http_get(Config, "/vhosts/%2f", ?OK)),
+    assert_item([{name, <<"myvhost">>}],http_get(Config, "/vhosts/myvhost")),
+    %% Delete it
+    http_delete(Config, "/vhosts/myvhost", ?NO_CONTENT),
+    %% It's not there
+    http_get(Config, "/vhosts/myvhost", ?NOT_FOUND),
+    http_delete(Config, "/vhosts/myvhost", ?NOT_FOUND),
+
+    passed.
+
+vhosts_trace_test(Config) ->
+    http_put(Config, "/vhosts/myvhost", none, [?CREATED, ?NO_CONTENT]),
+    Disabled = [{name,  <<"myvhost">>}, {tracing, false}],
+    Enabled  = [{name,  <<"myvhost">>}, {tracing, true}],
+    Disabled = http_get(Config, "/vhosts/myvhost"),
+    http_put(Config, "/vhosts/myvhost", [{tracing, true}], ?NO_CONTENT),
+    Enabled = http_get(Config, "/vhosts/myvhost"),
+    http_put(Config, "/vhosts/myvhost", [{tracing, true}], ?NO_CONTENT),
+    Enabled = http_get(Config, "/vhosts/myvhost"),
+    http_put(Config, "/vhosts/myvhost", [{tracing, false}], ?NO_CONTENT),
+    Disabled = http_get(Config, "/vhosts/myvhost"),
+    http_delete(Config, "/vhosts/myvhost", ?NO_CONTENT),
+
+    passed.
+
+users_test(Config) ->
+    assert_item([{name, <<"guest">>}, {tags, <<"administrator">>}],
+                http_get(Config, "/whoami")),
+    http_get(Config, "/users/myuser", ?NOT_FOUND),
+    http_put_raw(Config, "/users/myuser", "Something not JSON", ?BAD_REQUEST),
+    http_put(Config, "/users/myuser", [{flim, <<"flam">>}], ?BAD_REQUEST),
+    http_put(Config, "/users/myuser", [{tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/users/myuser", [{password_hash, <<"not_hash">>}], ?BAD_REQUEST),
+    http_put(Config, "/users/myuser", [{password_hash,
+                                        <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
+                                       {tags, <<"management">>}], ?NO_CONTENT),
+    assert_item([{name, <<"myuser">>}, {tags, <<"management">>},
+                 {password_hash, <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
+                 {hashing_algorithm, <<"rabbit_password_hashing_sha256">>}],
+                http_get(Config, "/users/myuser")),
+
+    http_put(Config, "/users/myuser", [{password_hash,
+                                        <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
+                                       {hashing_algorithm, <<"rabbit_password_hashing_md5">>},
+                                       {tags, <<"management">>}], ?NO_CONTENT),
+    assert_item([{name, <<"myuser">>}, {tags, <<"management">>},
+                 {password_hash, <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
+                 {hashing_algorithm, <<"rabbit_password_hashing_md5">>}],
+                http_get(Config, "/users/myuser")),
+    http_put(Config, "/users/myuser", [{password, <<"password">>},
+                                       {tags, <<"administrator, foo">>}], ?NO_CONTENT),
+    assert_item([{name, <<"myuser">>}, {tags, <<"administrator,foo">>}],
+                http_get(Config, "/users/myuser")),
+    assert_list([[{name, <<"myuser">>}, {tags, <<"administrator,foo">>}],
+                 [{name, <<"guest">>}, {tags, <<"administrator">>}]],
+                http_get(Config, "/users")),
+    test_auth(Config, ?OK, [auth_header("myuser", "password")]),
+    http_delete(Config, "/users/myuser", ?NO_CONTENT),
+    test_auth(Config, ?NOT_AUTHORISED, [auth_header("myuser", "password")]),
+    http_get(Config, "/users/myuser", ?NOT_FOUND),
+    passed.
+
+users_legacy_administrator_test(Config) ->
+    http_put(Config, "/users/myuser1", [{administrator, <<"true">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/users/myuser2", [{administrator, <<"false">>}], [?CREATED, ?NO_CONTENT]),
+    assert_item([{name, <<"myuser1">>}, {tags, <<"administrator">>}],
+                http_get(Config, "/users/myuser1")),
+    assert_item([{name, <<"myuser2">>}, {tags, <<"">>}],
+                http_get(Config, "/users/myuser2")),
+    http_delete(Config, "/users/myuser1", ?NO_CONTENT),
+    http_delete(Config, "/users/myuser2", ?NO_CONTENT),
+    passed.
+
+permissions_validation_test(Config) ->
+    Good = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+    http_put(Config, "/permissions/wrong/guest", Good, ?BAD_REQUEST),
+    http_put(Config, "/permissions/%2f/wrong", Good, ?BAD_REQUEST),
+    http_put(Config, "/permissions/%2f/guest",
+             [{configure, <<"[">>}, {write, <<".*">>}, {read, <<".*">>}],
+             ?BAD_REQUEST),
+    http_put(Config, "/permissions/%2f/guest", Good, ?NO_CONTENT),
+    passed.
+
+permissions_list_test(Config) ->
+    [[{user,<<"guest">>},
+      {vhost,<<"/">>},
+      {configure,<<".*">>},
+      {write,<<".*">>},
+      {read,<<".*">>}]] =
+        http_get(Config, "/permissions"),
+
+    http_put(Config, "/users/myuser1", [{password, <<"">>}, {tags, <<"administrator">>}],
+             [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/users/myuser2", [{password, <<"">>}, {tags, <<"administrator">>}],
+             [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/vhosts/myvhost1", none, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/vhosts/myvhost2", none, [?CREATED, ?NO_CONTENT]),
+
+    Perms = [{configure, <<"foo">>}, {write, <<"foo">>}, {read, <<"foo">>}],
+    http_put(Config, "/permissions/myvhost1/myuser1", Perms, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/myvhost2/myuser1", Perms, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/myvhost1/myuser2", Perms, [?CREATED, ?NO_CONTENT]),
+
+    4 = length(http_get(Config, "/permissions")),
+    2 = length(http_get(Config, "/users/myuser1/permissions")),
+    1 = length(http_get(Config, "/users/myuser2/permissions")),
+
+    http_get(Config, "/users/notmyuser/permissions", ?NOT_FOUND),
+    http_get(Config, "/vhosts/notmyvhost/permissions", ?NOT_FOUND),
+
+    http_delete(Config, "/users/myuser1", ?NO_CONTENT),
+    http_delete(Config, "/users/myuser2", ?NO_CONTENT),
+    http_delete(Config, "/vhosts/myvhost1", ?NO_CONTENT),
+    http_delete(Config, "/vhosts/myvhost2", ?NO_CONTENT),
+    passed.
+
+permissions_test(Config) ->
+    http_put(Config, "/users/myuser", [{password, <<"myuser">>}, {tags, <<"administrator">>}],
+             [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/vhosts/myvhost", none, [?CREATED, ?NO_CONTENT]),
+
+    http_put(Config, "/permissions/myvhost/myuser",
+             [{configure, <<"foo">>}, {write, <<"foo">>}, {read, <<"foo">>}],
+             [?CREATED, ?NO_CONTENT]),
+
+    Permission = [{user,<<"myuser">>},
+                  {vhost,<<"myvhost">>},
+                  {configure,<<"foo">>},
+                  {write,<<"foo">>},
+                  {read,<<"foo">>}],
+    Default = [{user,<<"guest">>},
+               {vhost,<<"/">>},
+               {configure,<<".*">>},
+               {write,<<".*">>},
+               {read,<<".*">>}],
+    Permission = http_get(Config, "/permissions/myvhost/myuser"),
+    assert_list([Permission, Default], http_get(Config, "/permissions")),
+    assert_list([Permission], http_get(Config, "/users/myuser/permissions")),
+    http_delete(Config, "/permissions/myvhost/myuser", ?NO_CONTENT),
+    http_get(Config, "/permissions/myvhost/myuser", ?NOT_FOUND),
+
+    http_delete(Config, "/users/myuser", ?NO_CONTENT),
+    http_delete(Config, "/vhosts/myvhost", ?NO_CONTENT),
+    passed.
+
+connections_test(Config) ->
+    {Conn, _Ch} = open_connection_and_channel(Config),
+    LocalPort = local_port(Conn),
+    Path = binary_to_list(
+             rabbit_mgmt_format:print(
+               "/connections/127.0.0.1%3A~w%20->%20127.0.0.1%3A~w",
+               [LocalPort, amqp_port(Config)])),
+    http_get(Config, Path, ?OK),
+    http_delete(Config, Path, ?NO_CONTENT),
+    %% TODO rabbit_reader:shutdown/2 returns before the connection is
+    %% closed. It may not be worth fixing.
+    timer:sleep(200),
+    http_get(Config, Path, ?NOT_FOUND),
+    close_connection(Conn),
+    passed.
+
+multiple_invalid_connections_test(Config) ->
+    Count = 100,
+    spawn_invalid(Config, Count),
+    Page0 = http_get(Config, "/connections?page=1&page_size=100", ?OK),
+    wait_for_answers(Count),
+    Page1 = http_get(Config, "/connections?page=1&page_size=100", ?OK),
+    ?assertEqual(0, proplists:get_value(total_count, Page0)),
+    ?assertEqual(0, proplists:get_value(total_count, Page1)),
+    passed.
+
+test_auth(Config, Code, Headers) ->
+    {ok, {{_, Code, _}, _, _}} = req(Config, get, "/overview", Headers),
+    passed.
+
+exchanges_test(Config) ->
+    %% Can pass booleans or strings
+    Good = [{type, <<"direct">>}, {durable, <<"true">>}],
+    http_put(Config, "/vhosts/myvhost", none, [?CREATED, ?NO_CONTENT]),
+    http_get(Config, "/exchanges/myvhost/foo", ?NOT_AUTHORISED),
+    http_put(Config, "/exchanges/myvhost/foo", Good, ?NOT_AUTHORISED),
+    http_put(Config, "/permissions/myvhost/guest",
+             [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+             [?CREATED, ?NO_CONTENT]),
+    http_get(Config, "/exchanges/myvhost/foo", ?NOT_FOUND),
+    http_put(Config, "/exchanges/myvhost/foo", Good, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/exchanges/myvhost/foo", Good, ?NO_CONTENT),
+    http_get(Config, "/exchanges/%2f/foo", ?NOT_FOUND),
+    assert_item([{name,<<"foo">>},
+                 {vhost,<<"myvhost">>},
+                 {type,<<"direct">>},
+                 {durable,true},
+                 {auto_delete,false},
+                 {internal,false},
+                 {arguments,[]}],
+                http_get(Config, "/exchanges/myvhost/foo")),
+
+    http_put(Config, "/exchanges/badvhost/bar", Good, ?NOT_FOUND),
+    http_put(Config, "/exchanges/myvhost/bar", [{type, <<"bad_exchange_type">>}],
+             ?BAD_REQUEST),
+    http_put(Config, "/exchanges/myvhost/bar", [{type, <<"direct">>},
+                                                {durable, <<"troo">>}],
+             ?BAD_REQUEST),
+    http_put(Config, "/exchanges/myvhost/foo", [{type, <<"direct">>}],
+             ?BAD_REQUEST),
+
+    http_delete(Config, "/exchanges/myvhost/foo", ?NO_CONTENT),
+    http_delete(Config, "/exchanges/myvhost/foo", ?NOT_FOUND),
+
+    http_delete(Config, "/vhosts/myvhost", ?NO_CONTENT),
+    http_get(Config, "/exchanges/badvhost", ?NOT_FOUND),
+    passed.
+
+queues_test(Config) ->
+    Good = [{durable, true}],
+    http_get(Config, "/queues/%2f/foo", ?NOT_FOUND),
+    http_put(Config, "/queues/%2f/foo", Good, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/%2f/foo", Good, ?NO_CONTENT),
+    http_get(Config, "/queues/%2f/foo", ?OK),
+
+    http_put(Config, "/queues/badvhost/bar", Good, ?NOT_FOUND),
+    http_put(Config, "/queues/%2f/bar",
+             [{durable, <<"troo">>}],
+             ?BAD_REQUEST),
+    http_put(Config, "/queues/%2f/foo",
+             [{durable, false}],
+             ?BAD_REQUEST),
+
+    http_put(Config, "/queues/%2f/baz", Good, [?CREATED, ?NO_CONTENT]),
+
+    Queues = http_get(Config, "/queues/%2f"),
+    Queue = http_get(Config, "/queues/%2f/foo"),
+    assert_list([[{name,        <<"foo">>},
+                  {vhost,       <<"/">>},
+                  {durable,     true},
+                  {auto_delete, false},
+                  {exclusive,   false},
+                  {arguments,   []}],
+                 [{name,        <<"baz">>},
+                  {vhost,       <<"/">>},
+                  {durable,     true},
+                  {auto_delete, false},
+                  {exclusive,   false},
+                  {arguments,   []}]], Queues),
+    assert_item([{name,        <<"foo">>},
+                 {vhost,       <<"/">>},
+                 {durable,     true},
+                 {auto_delete, false},
+                 {exclusive,   false},
+                 {arguments,   []}], Queue),
+
+    http_delete(Config, "/queues/%2f/foo", ?NO_CONTENT),
+    http_delete(Config, "/queues/%2f/baz", ?NO_CONTENT),
+    http_delete(Config, "/queues/%2f/foo", ?NOT_FOUND),
+    http_get(Config, "/queues/badvhost", ?NOT_FOUND),
+    passed.
+
+bindings_test(Config) ->
+    XArgs = [{type, <<"direct">>}],
+    QArgs = [],
+    http_put(Config, "/exchanges/%2f/myexchange", XArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/%2f/myqueue", QArgs, [?CREATED, ?NO_CONTENT]),
+    BArgs = [{routing_key, <<"routing">>}, {arguments, []}],
+    http_post(Config, "/bindings/%2f/e/myexchange/q/myqueue", BArgs, [?CREATED, ?NO_CONTENT]),
+    http_get(Config, "/bindings/%2f/e/myexchange/q/myqueue/routing", ?OK),
+    http_get(Config, "/bindings/%2f/e/myexchange/q/myqueue/rooting", ?NOT_FOUND),
+    Binding =
+        [{source,<<"myexchange">>},
+         {vhost,<<"/">>},
+         {destination,<<"myqueue">>},
+         {destination_type,<<"queue">>},
+         {routing_key,<<"routing">>},
+         {arguments,[]},
+         {properties_key,<<"routing">>}],
+    DBinding =
+        [{source,<<"">>},
+         {vhost,<<"/">>},
+         {destination,<<"myqueue">>},
+         {destination_type,<<"queue">>},
+         {routing_key,<<"myqueue">>},
+         {arguments,[]},
+         {properties_key,<<"myqueue">>}],
+    Binding = http_get(Config, "/bindings/%2f/e/myexchange/q/myqueue/routing"),
+    assert_list([Binding],
+                http_get(Config, "/bindings/%2f/e/myexchange/q/myqueue")),
+    assert_list([Binding, DBinding],
+                http_get(Config, "/queues/%2f/myqueue/bindings")),
+    assert_list([Binding],
+                http_get(Config, "/exchanges/%2f/myexchange/bindings/source")),
+    http_delete(Config, "/bindings/%2f/e/myexchange/q/myqueue/routing", ?NO_CONTENT),
+    http_delete(Config, "/bindings/%2f/e/myexchange/q/myqueue/routing", ?NOT_FOUND),
+    http_delete(Config, "/exchanges/%2f/myexchange", ?NO_CONTENT),
+    http_delete(Config, "/queues/%2f/myqueue", ?NO_CONTENT),
+    http_get(Config, "/bindings/badvhost", ?NOT_FOUND),
+    http_get(Config, "/bindings/badvhost/myqueue/myexchange/routing", ?NOT_FOUND),
+    http_get(Config, "/bindings/%2f/e/myexchange/q/myqueue/routing", ?NOT_FOUND),
+    passed.
+
+bindings_post_test(Config) ->
+    XArgs = [{type, <<"direct">>}],
+    QArgs = [],
+    BArgs = [{routing_key, <<"routing">>}, {arguments, [{foo, <<"bar">>}]}],
+    http_put(Config, "/exchanges/%2f/myexchange", XArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/%2f/myqueue", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_post(Config, "/bindings/%2f/e/myexchange/q/badqueue", BArgs, ?NOT_FOUND),
+    http_post(Config, "/bindings/%2f/e/badexchange/q/myqueue", BArgs, ?NOT_FOUND),
+    Headers1 = http_post(Config, "/bindings/%2f/e/myexchange/q/myqueue", [], [?CREATED, ?NO_CONTENT]),
+    "../../../../%2F/e/myexchange/q/myqueue/~" = pget("location", Headers1),
+    Headers2 = http_post(Config, "/bindings/%2f/e/myexchange/q/myqueue", BArgs, [?CREATED, ?NO_CONTENT]),
+    PropertiesKey = "routing~V4mGFgnPNrdtRmluZIxTDA",
+    PropertiesKeyBin = list_to_binary(PropertiesKey),
+    "../../../../%2F/e/myexchange/q/myqueue/" ++ PropertiesKey =
+        pget("location", Headers2),
+    URI = "/bindings/%2F/e/myexchange/q/myqueue/" ++ PropertiesKey,
+    [{source,<<"myexchange">>},
+     {vhost,<<"/">>},
+     {destination,<<"myqueue">>},
+     {destination_type,<<"queue">>},
+     {routing_key,<<"routing">>},
+     {arguments,[{foo,<<"bar">>}]},
+     {properties_key,PropertiesKeyBin}] = http_get(Config, URI, ?OK),
+    http_get(Config, URI ++ "x", ?NOT_FOUND),
+    http_delete(Config, URI, ?NO_CONTENT),
+    http_delete(Config, "/exchanges/%2f/myexchange", ?NO_CONTENT),
+    http_delete(Config, "/queues/%2f/myqueue", ?NO_CONTENT),
+    passed.
+
+bindings_e2e_test(Config) ->
+    BArgs = [{routing_key, <<"routing">>}, {arguments, []}],
+    http_post(Config, "/bindings/%2f/e/amq.direct/e/badexchange", BArgs, ?NOT_FOUND),
+    http_post(Config, "/bindings/%2f/e/badexchange/e/amq.fanout", BArgs, ?NOT_FOUND),
+    Headers = http_post(Config, "/bindings/%2f/e/amq.direct/e/amq.fanout", BArgs, [?CREATED, ?NO_CONTENT]),
+    "../../../../%2F/e/amq.direct/e/amq.fanout/routing" =
+        pget("location", Headers),
+    [{source,<<"amq.direct">>},
+     {vhost,<<"/">>},
+     {destination,<<"amq.fanout">>},
+     {destination_type,<<"exchange">>},
+     {routing_key,<<"routing">>},
+     {arguments,[]},
+     {properties_key,<<"routing">>}] =
+        http_get(Config, "/bindings/%2f/e/amq.direct/e/amq.fanout/routing", ?OK),
+    http_delete(Config, "/bindings/%2f/e/amq.direct/e/amq.fanout/routing", ?NO_CONTENT),
+    http_post(Config, "/bindings/%2f/e/amq.direct/e/amq.headers", BArgs, [?CREATED, ?NO_CONTENT]),
+    Binding =
+        [{source,<<"amq.direct">>},
+         {vhost,<<"/">>},
+         {destination,<<"amq.headers">>},
+         {destination_type,<<"exchange">>},
+         {routing_key,<<"routing">>},
+         {arguments,[]},
+         {properties_key,<<"routing">>}],
+    Binding = http_get(Config, "/bindings/%2f/e/amq.direct/e/amq.headers/routing"),
+    assert_list([Binding],
+                http_get(Config, "/bindings/%2f/e/amq.direct/e/amq.headers")),
+    assert_list([Binding],
+                http_get(Config, "/exchanges/%2f/amq.direct/bindings/source")),
+    assert_list([Binding],
+                http_get(Config, "/exchanges/%2f/amq.headers/bindings/destination")),
+    http_delete(Config, "/bindings/%2f/e/amq.direct/e/amq.headers/routing", ?NO_CONTENT),
+    http_get(Config, "/bindings/%2f/e/amq.direct/e/amq.headers/rooting", ?NOT_FOUND),
+    passed.
+
+permissions_administrator_test(Config) ->
+    http_put(Config, "/users/isadmin", [{password, <<"isadmin">>},
+                                        {tags, <<"administrator">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/users/notadmin", [{password, <<"notadmin">>},
+                                         {tags, <<"administrator">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/users/notadmin", [{password, <<"notadmin">>},
+                                         {tags, <<"management">>}], ?NO_CONTENT),
+    Test =
+        fun(Path) ->
+                http_get(Config, Path, "notadmin", "notadmin", ?NOT_AUTHORISED),
+                http_get(Config, Path, "isadmin", "isadmin", ?OK),
+                http_get(Config, Path, "guest", "guest", ?OK)
+        end,
+    %% All users can get a list of vhosts. It may be filtered.
+    %%Test("/vhosts"),
+    Test("/vhosts/%2f"),
+    Test("/vhosts/%2f/permissions"),
+    Test("/users"),
+    Test("/users/guest"),
+    Test("/users/guest/permissions"),
+    Test("/permissions"),
+    Test("/permissions/%2f/guest"),
+    http_delete(Config, "/users/notadmin", ?NO_CONTENT),
+    http_delete(Config, "/users/isadmin", ?NO_CONTENT),
+    passed.
+
+permissions_vhost_test(Config) ->
+    QArgs = [],
+    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+    http_put(Config, "/users/myuser", [{password, <<"myuser">>},
+                                       {tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/vhosts/myvhost1", none, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/vhosts/myvhost2", none, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/myvhost1/myuser", PermArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/myvhost1/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/myvhost2/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
+    assert_list([[{name, <<"/">>}],
+                 [{name, <<"myvhost1">>}],
+                 [{name, <<"myvhost2">>}]], http_get(Config, "/vhosts", ?OK)),
+    assert_list([[{name, <<"myvhost1">>}]],
+                http_get(Config, "/vhosts", "myuser", "myuser", ?OK)),
+    http_put(Config, "/queues/myvhost1/myqueue", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/myvhost2/myqueue", QArgs, [?CREATED, ?NO_CONTENT]),
+    Test1 =
+        fun(Path) ->
+                Results = http_get(Config, Path, "myuser", "myuser", ?OK),
+                [case pget(vhost, Result) of
+                     <<"myvhost2">> ->
+                         throw({got_result_from_vhost2_in, Path, Result});
+                     _ ->
+                         ok
+                 end || Result <- Results]
+        end,
+    Test2 =
+        fun(Path1, Path2) ->
+                http_get(Config, Path1 ++ "/myvhost1/" ++ Path2, "myuser", "myuser",
+                         ?OK),
+                http_get(Config, Path1 ++ "/myvhost2/" ++ Path2, "myuser", "myuser",
+                         ?NOT_AUTHORISED)
+        end,
+    Test1("/exchanges"),
+    Test2("/exchanges", ""),
+    Test2("/exchanges", "amq.direct"),
+    Test1("/queues"),
+    Test2("/queues", ""),
+    Test2("/queues", "myqueue"),
+    Test1("/bindings"),
+    Test2("/bindings", ""),
+    Test2("/queues", "myqueue/bindings"),
+    Test2("/exchanges", "amq.default/bindings/source"),
+    Test2("/exchanges", "amq.default/bindings/destination"),
+    Test2("/bindings", "e/amq.default/q/myqueue"),
+    Test2("/bindings", "e/amq.default/q/myqueue/myqueue"),
+    http_delete(Config, "/vhosts/myvhost1", ?NO_CONTENT),
+    http_delete(Config, "/vhosts/myvhost2", ?NO_CONTENT),
+    http_delete(Config, "/users/myuser", ?NO_CONTENT),
+    passed.
+
+permissions_amqp_test(Config) ->
+    %% Just test that it works at all, not that it works in all possible cases.
+    QArgs = [],
+    PermArgs = [{configure, <<"foo.*">>}, {write, <<"foo.*">>},
+                {read,      <<"foo.*">>}],
+    http_put(Config, "/users/myuser", [{password, <<"myuser">>},
+                                       {tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/%2f/myuser", PermArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/%2f/bar-queue", QArgs, "myuser", "myuser",
+             ?NOT_AUTHORISED),
+    http_put(Config, "/queues/%2f/bar-queue", QArgs, "nonexistent", "nonexistent",
+             ?NOT_AUTHORISED),
+    http_delete(Config, "/users/myuser", ?NO_CONTENT),
+    passed.
+
+%% Opens a new connection and a channel on it.
+%% The channel is not managed by rabbit_ct_client_helpers and
+%% should be explicitly closed by the caller.
+open_connection_and_channel(Config) ->
+    Conn = rabbit_ct_client_helpers:open_connection(Config, 0),
+    {ok, Ch}   = amqp_connection:open_channel(Conn),
+    {Conn, Ch}.
+
+get_conn(Config, Username, Password) ->
+    Port       = amqp_port(Config),
+    {ok, Conn} = amqp_connection:start(#amqp_params_network{
+                                          port     = Port,
+                                         username = list_to_binary(Username),
+                                         password = list_to_binary(Password)}),
+    LocalPort = local_port(Conn),
+    ConnPath = rabbit_misc:format(
+                 "/connections/127.0.0.1%3A~w%20->%20127.0.0.1%3A~w",
+                 [LocalPort, Port]),
+    ChPath = rabbit_misc:format(
+               "/channels/127.0.0.1%3A~w%20->%20127.0.0.1%3A~w%20(1)",
+               [LocalPort, Port]),
+    ConnChPath = rabbit_misc:format(
+                   "/connections/127.0.0.1%3A~w%20->%20127.0.0.1%3A~w/channels",
+                   [LocalPort, Port]),
+    {Conn, ConnPath, ChPath, ConnChPath}.
+
+permissions_connection_channel_consumer_test(Config) ->
+    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+    http_put(Config, "/users/user", [{password, <<"user">>},
+                                     {tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/%2f/user", PermArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/users/monitor", [{password, <<"monitor">>},
+                                        {tags, <<"monitoring">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/%2f/monitor", PermArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/%2f/test", [], [?CREATED, ?NO_CONTENT]),
+
+    {Conn1, UserConn, UserCh, UserConnCh} = get_conn(Config, "user", "user"),
+    {Conn2, MonConn, MonCh, MonConnCh} = get_conn(Config, "monitor", "monitor"),
+    {Conn3, AdmConn, AdmCh, AdmConnCh} = get_conn(Config, "guest", "guest"),
+    {ok, Ch1} = amqp_connection:open_channel(Conn1),
+    {ok, Ch2} = amqp_connection:open_channel(Conn2),
+    {ok, Ch3} = amqp_connection:open_channel(Conn3),
+    [amqp_channel:subscribe(
+       Ch, #'basic.consume'{queue = <<"test">>}, self()) ||
+        Ch <- [Ch1, Ch2, Ch3]],
+    AssertLength = fun (Path, User, Len) ->
+                           ?assertEqual(Len,
+                                        length(http_get(Config, Path, User, User, ?OK)))
+                   end,
+    [begin
+         AssertLength(P, "user", 1),
+         AssertLength(P, "monitor", 3),
+         AssertLength(P, "guest", 3)
+     end || P <- ["/connections", "/channels", "/consumers", "/consumers/%2f"]],
+
+    AssertRead = fun(Path, UserStatus) ->
+                         http_get(Config, Path, "user", "user", UserStatus),
+                         http_get(Config, Path, "monitor", "monitor", ?OK),
+                         http_get(Config, Path, ?OK)
+                 end,
+    AssertRead(UserConn, ?OK),
+    AssertRead(MonConn, ?NOT_AUTHORISED),
+    AssertRead(AdmConn, ?NOT_AUTHORISED),
+    AssertRead(UserCh, ?OK),
+    AssertRead(MonCh, ?NOT_AUTHORISED),
+    AssertRead(AdmCh, ?NOT_AUTHORISED),
+    AssertRead(UserConnCh, ?OK),
+    AssertRead(MonConnCh, ?NOT_AUTHORISED),
+    AssertRead(AdmConnCh, ?NOT_AUTHORISED),
+
+    AssertClose = fun(Path, User, Status) ->
+                          http_delete(Config, Path, User, User, Status)
+                  end,
+    AssertClose(UserConn, "monitor", ?NOT_AUTHORISED),
+    AssertClose(MonConn, "user", ?NOT_AUTHORISED),
+    AssertClose(AdmConn, "guest", ?NO_CONTENT),
+    AssertClose(MonConn, "guest", ?NO_CONTENT),
+    AssertClose(UserConn, "user", ?NO_CONTENT),
+
+    http_delete(Config, "/users/user", ?NO_CONTENT),
+    http_delete(Config, "/users/monitor", ?NO_CONTENT),
+    http_get(Config, "/connections/foo", ?NOT_FOUND),
+    http_get(Config, "/channels/foo", ?NOT_FOUND),
+    http_delete(Config, "/queues/%2f/test", ?NO_CONTENT),
+    passed.
+
+
+
+
+consumers_test(Config) ->
+    http_put(Config, "/queues/%2f/test", [], [?CREATED, ?NO_CONTENT]),
+    {Conn, _ConnPath, _ChPath, _ConnChPath} = get_conn(Config, "guest", "guest"),
+    {ok, Ch} = amqp_connection:open_channel(Conn),
+    amqp_channel:subscribe(
+      Ch, #'basic.consume'{queue        = <<"test">>,
+                           no_ack       = false,
+                           consumer_tag = <<"my-ctag">> }, self()),
+    assert_list([[{exclusive,    false},
+                  {ack_required, true},
+                  {consumer_tag, <<"my-ctag">>}]], http_get(Config, "/consumers")),
+    amqp_connection:close(Conn),
+    http_delete(Config, "/queues/%2f/test", ?NO_CONTENT),
+    passed.
+
+defs(Config, Key, URI, CreateMethod, Args) ->
+    defs(Config, Key, URI, CreateMethod, Args,
+         fun(URI2) -> http_delete(Config, URI2, ?NO_CONTENT) end).
+
+defs_v(Config, Key, URI, CreateMethod, Args) ->
+    Rep1 = fun (S, S2) -> re:replace(S, "<vhost>", S2, [{return, list}]) end,
+    Rep2 = fun (L, V2) -> lists:keymap(fun (vhost) -> V2;
+                                           (V)     -> V end, 2, L) end,
+    %% Test against default vhost
+    defs(Config, Key, Rep1(URI, "%2f"), CreateMethod, Rep2(Args, <<"/">>)),
+
+    %% Test against new vhost
+    http_put(Config, "/vhosts/test", none, [?CREATED, ?NO_CONTENT]),
+    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+    http_put(Config, "/permissions/test/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
+    defs(Config, Key, Rep1(URI, "test"), CreateMethod, Rep2(Args, <<"test">>),
+         fun(URI2) -> http_delete(Config, URI2, ?NO_CONTENT),
+                      http_delete(Config, "/vhosts/test", ?NO_CONTENT) end).
+
+create(Config, CreateMethod, URI, Args) ->
+    case CreateMethod of
+        put        -> http_put(Config, URI, Args, [?CREATED, ?NO_CONTENT]),
+                      URI;
+        put_update -> http_put(Config, URI, Args, ?NO_CONTENT),
+                      URI;
+        post       -> Headers = http_post(Config, URI, Args, [?CREATED, ?NO_CONTENT]),
+                      rabbit_web_dispatch_util:unrelativise(
+                        URI, pget("location", Headers))
+    end.
+
+defs(Config, Key, URI, CreateMethod, Args, DeleteFun) ->
+    %% Create the item
+    URI2 = create(Config, CreateMethod, URI, Args),
+    %% Make sure it ends up in definitions
+    Definitions = http_get(Config, "/definitions", ?OK),
+    true = lists:any(fun(I) -> test_item(Args, I) end, pget(Key, Definitions)),
+
+    %% Delete it
+    DeleteFun(URI2),
+
+    %% Post the definitions back, it should get recreated in correct form
+    http_post(Config, "/definitions", Definitions, ?CREATED),
+    assert_item(Args, http_get(Config, URI2, ?OK)),
+
+    %% And delete it again
+    DeleteFun(URI2),
+
+    passed.
+
+register_parameters_and_policy_validator(Config) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, register, []),
+    rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, register_policy_validator, []).
+
+unregister_parameters_and_policy_validator(Config) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, unregister_policy_validator, []),
+    rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_mgmt_runtime_parameters_util, unregister, []).
+
+definitions_test(Config) ->
+    register_parameters_and_policy_validator(Config),
+
+    defs_v(Config, queues, "/queues/<vhost>/my-queue", put,
+           [{name,    <<"my-queue">>},
+            {durable, true}]),
+    defs_v(Config, exchanges, "/exchanges/<vhost>/my-exchange", put,
+           [{name, <<"my-exchange">>},
+            {type, <<"direct">>}]),
+    defs_v(Config, bindings, "/bindings/<vhost>/e/amq.direct/e/amq.fanout", post,
+           [{routing_key, <<"routing">>}, {arguments, []}]),
+    defs_v(Config, policies, "/policies/<vhost>/my-policy", put,
+           [{vhost,      vhost},
+            {name,       <<"my-policy">>},
+            {pattern,    <<".*">>},
+            {definition, [{testpos, [1, 2, 3]}]},
+            {priority,   1}]),
+    defs_v(Config, parameters, "/parameters/test/<vhost>/good", put,
+           [{vhost,     vhost},
+            {component, <<"test">>},
+            {name,      <<"good">>},
+            {value,     <<"ignore">>}]),
+    defs(Config, users, "/users/myuser", put,
+         [{name,          <<"myuser">>},
+          {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
+          {hashing_algorithm, <<"rabbit_password_hashing_sha256">>},
+          {tags,          <<"management">>}]),
+    defs(Config, vhosts, "/vhosts/myvhost", put,
+         [{name, <<"myvhost">>}]),
+    defs(Config, permissions, "/permissions/%2f/guest", put,
+         [{user,      <<"guest">>},
+          {vhost,     <<"/">>},
+          {configure, <<"c">>},
+          {write,     <<"w">>},
+          {read,      <<"r">>}]),
+
+    %% We just messed with guest's permissions
+    http_put(Config, "/permissions/%2f/guest",
+             [{configure, <<".*">>},
+              {write,     <<".*">>},
+              {read,      <<".*">>}], [?CREATED, ?NO_CONTENT]),
+    BrokenConfig =
+        [{users,       []},
+         {vhosts,      []},
+         {permissions, []},
+         {queues,      []},
+         {exchanges,   [[{name,        <<"amq.direct">>},
+                         {vhost,       <<"/">>},
+                         {type,        <<"definitely not direct">>},
+                         {durable,     true},
+                         {auto_delete, false},
+                         {arguments,   []}
+                        ]]},
+         {bindings,    []}],
+    http_post(Config, "/definitions", BrokenConfig, ?BAD_REQUEST),
+
+    unregister_parameters_and_policy_validator(Config),
+    passed.
+
+defs_vhost(Config, Key, URI, CreateMethod, Args) ->
+    Rep1 = fun (S, S2) -> re:replace(S, "<vhost>", S2, [{return, list}]) end,
+    Rep2 = fun (L, V2) -> lists:keymap(fun (vhost) -> V2;
+                                           (V)     -> V end, 2, L) end,
+
+    %% Create test vhost
+    http_put(Config, "/vhosts/test", none, [?CREATED, ?NO_CONTENT]),
+    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+    http_put(Config, "/permissions/test/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
+
+    %% Test against default vhost
+    defs_vhost(Config, Key, URI, Rep1, "%2f", "test", CreateMethod,
+               Rep2(Args, <<"/">>), Rep2(Args, <<"test">>),
+               fun(URI2) -> http_delete(Config, URI2, [?NO_CONTENT, ?CREATED]) end),
+
+    %% Test against test vhost
+    defs_vhost(Config, Key, URI, Rep1, "test", "%2f", CreateMethod,
+               Rep2(Args, <<"test">>), Rep2(Args, <<"/">>),
+               fun(URI2) -> http_delete(Config, URI2, [?NO_CONTENT, ?CREATED]) end),
+
+    %% Remove test vhost
+    http_delete(Config, "/vhosts/test", ?NO_CONTENT).
+
+
+defs_vhost(Config, Key, URI0, Rep1, VHost1, VHost2, CreateMethod, Args1, Args2,
+           DeleteFun) ->
+    %% Create the item
+    URI2 = create(Config, CreateMethod, Rep1(URI0, VHost1), Args1),
+    %% Make sure it ends up in definitions
+    Definitions = http_get(Config, "/definitions/" ++ VHost1, ?OK),
+    true = lists:any(fun(I) -> test_item(Args1, I) end, pget(Key, Definitions)),
+
+    %% Make sure it is not in the other vhost
+    Definitions0 = http_get(Config, "/definitions/" ++ VHost2, ?OK),
+    false = lists:any(fun(I) -> test_item(Args2, I) end, pget(Key, Definitions0)),
+
+    %% Post the definitions back
+    http_post(Config, "/definitions/" ++ VHost2, Definitions, [?NO_CONTENT, ?CREATED]),
+
+    %% Make sure it is now in the other vhost
+    Definitions1 = http_get(Config, "/definitions/" ++ VHost2, ?OK),
+    true = lists:any(fun(I) -> test_item(Args2, I) end, pget(Key, Definitions1)),
+
+    %% Delete it
+    DeleteFun(URI2),
+    URI3 = create(Config, CreateMethod, Rep1(URI0, VHost2), Args2),
+    DeleteFun(URI3),
+    passed.
+
+definitions_vhost_test(Config) ->
+    %% Ensures that definitions can be exported/imported from a single virtual
+    %% host to another
+
+    register_parameters_and_policy_validator(Config),
+
+    defs_vhost(Config, queues, "/queues/<vhost>/my-queue", put,
+               [{name,    <<"my-queue">>},
+                {durable, true}]),
+    defs_vhost(Config, exchanges, "/exchanges/<vhost>/my-exchange", put,
+               [{name, <<"my-exchange">>},
+                {type, <<"direct">>}]),
+    defs_vhost(Config, bindings, "/bindings/<vhost>/e/amq.direct/e/amq.fanout", post,
+               [{routing_key, <<"routing">>}, {arguments, []}]),
+    defs_vhost(Config, policies, "/policies/<vhost>/my-policy", put,
+               [{vhost,      vhost},
+                {name,       <<"my-policy">>},
+                {pattern,    <<".*">>},
+                {definition, [{testpos, [1, 2, 3]}]},
+                {priority,   1}]),
+
+    Upload =
+        [{queues,      []},
+         {exchanges,   []},
+         {policies,    []},
+         {bindings,    []}],
+    http_post(Config, "/definitions/othervhost", Upload, ?BAD_REQUEST),
+
+    unregister_parameters_and_policy_validator(Config),
+    passed.
+
+definitions_password_test(Config) ->
+                                                % Import definitions from 3.5.x
+    Config35 = [{rabbit_version, <<"3.5.4">>},
+                {users, [[{name,          <<"myuser">>},
+                          {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
+                          {tags,          <<"management">>}]
+                        ]}],
+    Expected35 = [{name,          <<"myuser">>},
+                  {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
+                  {hashing_algorithm, <<"rabbit_password_hashing_md5">>},
+                  {tags,          <<"management">>}],
+    http_post(Config, "/definitions", Config35, ?CREATED),
+    Definitions35 = http_get(Config, "/definitions", ?OK),
+    Users35 = pget(users, Definitions35),
+    true = lists:any(fun(I) -> test_item(Expected35, I) end, Users35),
+
+    %% Import definitions from from 3.6.0
+    Config36 = [{rabbit_version, <<"3.6.0">>},
+                {users, [[{name,          <<"myuser">>},
+                          {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
+                          {tags,          <<"management">>}]
+                        ]}],
+    Expected36 = [{name,          <<"myuser">>},
+                  {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
+                  {hashing_algorithm, <<"rabbit_password_hashing_sha256">>},
+                  {tags,          <<"management">>}],
+    http_post(Config, "/definitions", Config36, ?CREATED),
+
+    Definitions36 = http_get(Config, "/definitions", ?OK),
+    Users36 = pget(users, Definitions36),
+    true = lists:any(fun(I) -> test_item(Expected36, I) end, Users36),
+
+    %% No hashing_algorithm provided
+    ConfigDefault = [{rabbit_version, <<"3.6.1">>},
+                     {users, [[{name,          <<"myuser">>},
+                               {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
+                               {tags,          <<"management">>}]
+                             ]}],
+    rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbit,
+                                                                   password_hashing_module,
+                                                                   rabbit_password_hashing_sha512]),
+
+    ExpectedDefault = [{name,          <<"myuser">>},
+                       {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
+                       {hashing_algorithm, <<"rabbit_password_hashing_sha512">>},
+                       {tags,          <<"management">>}],
+    http_post(Config, "/definitions", ConfigDefault, ?CREATED),
+
+    DefinitionsDefault = http_get(Config, "/definitions", ?OK),
+    UsersDefault = pget(users, DefinitionsDefault),
+
+    true = lists:any(fun(I) -> test_item(ExpectedDefault, I) end, UsersDefault),
+    passed.
+
+definitions_remove_things_test(Config) ->
+    {Conn, Ch} = open_connection_and_channel(Config),
+    amqp_channel:call(Ch, #'queue.declare'{ queue = <<"my-exclusive">>,
+                                            exclusive = true }),
+    http_get(Config, "/queues/%2f/my-exclusive", ?OK),
+    Definitions = http_get(Config, "/definitions", ?OK),
+    [] = pget(queues, Definitions),
+    [] = pget(exchanges, Definitions),
+    [] = pget(bindings, Definitions),
+    amqp_channel:close(Ch),
+    close_connection(Conn),
+    passed.
+
+definitions_server_named_queue_test(Config) ->
+    {Conn, Ch} = open_connection_and_channel(Config),
+    #'queue.declare_ok'{ queue = QName } =
+        amqp_channel:call(Ch, #'queue.declare'{}),
+    close_channel(Ch),
+    close_connection(Conn),
+    Path = "/queues/%2f/" ++ mochiweb_util:quote_plus(QName),
+    http_get(Config, Path, ?OK),
+    Definitions = http_get(Config, "/definitions", ?OK),
+    http_delete(Config, Path, ?NO_CONTENT),
+    http_get(Config, Path, ?NOT_FOUND),
+    http_post(Config, "/definitions", Definitions, [?CREATED, ?NO_CONTENT]),
+    http_get(Config, Path, ?OK),
+    http_delete(Config, Path, ?NO_CONTENT),
+    passed.
+
+aliveness_test(Config) ->
+    [{status, <<"ok">>}] = http_get(Config, "/aliveness-test/%2f", ?OK),
+    http_get(Config, "/aliveness-test/foo", ?NOT_FOUND),
+    http_delete(Config, "/queues/%2f/aliveness-test", ?NO_CONTENT),
+    passed.
+
+healthchecks_test(Config) ->
+    [{status, <<"ok">>}] = http_get(Config, "/healthchecks/node", ?OK),
+    http_get(Config, "/healthchecks/node/foo", ?NOT_FOUND),
+    passed.
+
+arguments_test(Config) ->
+    XArgs = [{type, <<"headers">>},
+             {arguments, [{'alternate-exchange', <<"amq.direct">>}]}],
+    QArgs = [{arguments, [{'x-expires', 1800000}]}],
+    BArgs = [{routing_key, <<"">>},
+             {arguments, [{'x-match', <<"all">>},
+                          {foo, <<"bar">>}]}],
+    http_put(Config, "/exchanges/%2f/myexchange", XArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/%2f/myqueue", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_post(Config, "/bindings/%2f/e/myexchange/q/myqueue", BArgs, [?CREATED, ?NO_CONTENT]),
+    Definitions = http_get(Config, "/definitions", ?OK),
+    http_delete(Config, "/exchanges/%2f/myexchange", ?NO_CONTENT),
+    http_delete(Config, "/queues/%2f/myqueue", ?NO_CONTENT),
+    http_post(Config, "/definitions", Definitions, ?CREATED),
+    [{'alternate-exchange', <<"amq.direct">>}] =
+        pget(arguments, http_get(Config, "/exchanges/%2f/myexchange", ?OK)),
+    [{'x-expires', 1800000}] =
+        pget(arguments, http_get(Config, "/queues/%2f/myqueue", ?OK)),
+    true = lists:sort([{'x-match', <<"all">>}, {foo, <<"bar">>}]) =:=
+       lists:sort(pget(arguments,
+                       http_get(Config, "/bindings/%2f/e/myexchange/q/myqueue/" ++
+                                    "~nXOkVwqZzUOdS9_HcBWheg", ?OK))),
+    http_delete(Config, "/exchanges/%2f/myexchange", ?NO_CONTENT),
+    http_delete(Config, "/queues/%2f/myqueue", ?NO_CONTENT),
+    passed.
+
+arguments_table_test(Config) ->
+    Args = [{'upstreams', [<<"amqp://localhost/%2f/upstream1">>,
+                           <<"amqp://localhost/%2f/upstream2">>]}],
+    XArgs = [{type, <<"headers">>},
+             {arguments, Args}],
+    http_put(Config, "/exchanges/%2f/myexchange", XArgs, [?CREATED, ?NO_CONTENT]),
+    Definitions = http_get(Config, "/definitions", ?OK),
+    http_delete(Config, "/exchanges/%2f/myexchange", ?NO_CONTENT),
+    http_post(Config, "/definitions", Definitions, ?CREATED),
+    Args = pget(arguments, http_get(Config, "/exchanges/%2f/myexchange", ?OK)),
+    http_delete(Config, "/exchanges/%2f/myexchange", ?NO_CONTENT),
+    passed.
+
+queue_purge_test(Config) ->
+    QArgs = [],
+    http_put(Config, "/queues/%2f/myqueue", QArgs, [?CREATED, ?NO_CONTENT]),
+    {Conn, Ch} = open_connection_and_channel(Config),
+    Publish = fun() ->
+                      amqp_channel:call(
+                        Ch, #'basic.publish'{exchange = <<"">>,
+                                             routing_key = <<"myqueue">>},
+                        #amqp_msg{payload = <<"message">>})
+              end,
+    Publish(),
+    Publish(),
+    amqp_channel:call(
+      Ch, #'queue.declare'{queue = <<"exclusive">>, exclusive = true}),
+    {#'basic.get_ok'{}, _} =
+        amqp_channel:call(Ch, #'basic.get'{queue = <<"myqueue">>}),
+    http_delete(Config, "/queues/%2f/myqueue/contents", ?NO_CONTENT),
+    http_delete(Config, "/queues/%2f/badqueue/contents", ?NOT_FOUND),
+    http_delete(Config, "/queues/%2f/exclusive/contents", ?BAD_REQUEST),
+    http_delete(Config, "/queues/%2f/exclusive", ?BAD_REQUEST),
+    #'basic.get_empty'{} =
+        amqp_channel:call(Ch, #'basic.get'{queue = <<"myqueue">>}),
+    close_channel(Ch),
+    close_connection(Conn),
+    http_delete(Config, "/queues/%2f/myqueue", ?NO_CONTENT),
+    passed.
+
+queue_actions_test(Config) ->
+    http_put(Config, "/queues/%2f/q", [], [?CREATED, ?NO_CONTENT]),
+    http_post(Config, "/queues/%2f/q/actions", [{action, sync}], ?NO_CONTENT),
+    http_post(Config, "/queues/%2f/q/actions", [{action, cancel_sync}], ?NO_CONTENT),
+    http_post(Config, "/queues/%2f/q/actions", [{action, change_colour}], ?BAD_REQUEST),
+    http_delete(Config, "/queues/%2f/q", ?NO_CONTENT),
+    passed.
+
+exclusive_consumer_test(Config) ->
+    {Conn, Ch} = open_connection_and_channel(Config),
+    #'queue.declare_ok'{ queue = QName } =
+        amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+    amqp_channel:subscribe(Ch, #'basic.consume'{queue     = QName,
+                                                exclusive = true}, self()),
+    timer:sleep(1000), %% Sadly we need to sleep to let the stats update
+    http_get(Config, "/queues/%2f/"), %% Just check we don't blow up
+    close_channel(Ch),
+    close_connection(Conn),
+    passed.
+
+
+exclusive_queue_test(Config) ->
+    {Conn, Ch} = open_connection_and_channel(Config),
+    #'queue.declare_ok'{ queue = QName } =
+       amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
+    timer:sleep(1000), %% Sadly we need to sleep to let the stats update
+    Path = "/queues/%2f/" ++ mochiweb_util:quote_plus(QName),
+    Queue = http_get(Config, Path),
+    assert_item([{name,         QName},
+                {vhost,       <<"/">>},
+                {durable,     false},
+                {auto_delete, false},
+                {exclusive,   true},
+                {arguments,   []}], Queue),
+    amqp_channel:close(Ch),
+    close_connection(Conn),
+    passed.
+
+connections_channels_pagination_test(Config) ->
+    %% this test uses "unmanaged" (by Common Test helpers) connections to avoid
+    %% connection caching
+    Conn      = open_unmanaged_connection(Config),
+    {ok, Ch}  = amqp_connection:open_channel(Conn),
+    Conn1     = open_unmanaged_connection(Config),
+    {ok, Ch1} = amqp_connection:open_channel(Conn1),
+    Conn2     = open_unmanaged_connection(Config),
+    {ok, Ch2} = amqp_connection:open_channel(Conn2),
+
+    timer:sleep(1000), %% Sadly we need to sleep to let the stats update
+    PageOfTwo = http_get(Config, "/connections?page=1&page_size=2", ?OK),
+    ?assertEqual(3, proplists:get_value(total_count, PageOfTwo)),
+    ?assertEqual(3, proplists:get_value(filtered_count, PageOfTwo)),
+    ?assertEqual(2, proplists:get_value(item_count, PageOfTwo)),
+    ?assertEqual(1, proplists:get_value(page, PageOfTwo)),
+    ?assertEqual(2, proplists:get_value(page_size, PageOfTwo)),
+    ?assertEqual(2, proplists:get_value(page_count, PageOfTwo)),
+
+
+    TwoOfTwo = http_get(Config, "/channels?page=2&page_size=2", ?OK),
+    ?assertEqual(3, proplists:get_value(total_count, TwoOfTwo)),
+    ?assertEqual(3, proplists:get_value(filtered_count, TwoOfTwo)),
+    ?assertEqual(1, proplists:get_value(item_count, TwoOfTwo)),
+    ?assertEqual(2, proplists:get_value(page, TwoOfTwo)),
+    ?assertEqual(2, proplists:get_value(page_size, TwoOfTwo)),
+    ?assertEqual(2, proplists:get_value(page_count, TwoOfTwo)),
+
+    amqp_channel:close(Ch),
+    amqp_connection:close(Conn),
+    amqp_channel:close(Ch1),
+    amqp_connection:close(Conn1),
+    amqp_channel:close(Ch2),
+    amqp_connection:close(Conn2),
+
+    passed.
+
+exchanges_pagination_test(Config) ->
+    QArgs = [],
+    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+    http_put(Config, "/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/vh1/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
+    http_get(Config, "/exchanges/vh1?page=1&page_size=2", ?OK),
+    http_put(Config, "/exchanges/%2f/test0", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/exchanges/vh1/test1", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/exchanges/%2f/test2_reg", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/exchanges/vh1/reg_test3", QArgs, [?CREATED, ?NO_CONTENT]),
+    PageOfTwo = http_get(Config, "/exchanges?page=1&page_size=2", ?OK),
+    ?assertEqual(19, proplists:get_value(total_count, PageOfTwo)),
+    ?assertEqual(19, proplists:get_value(filtered_count, PageOfTwo)),
+    ?assertEqual(2, proplists:get_value(item_count, PageOfTwo)),
+    ?assertEqual(1, proplists:get_value(page, PageOfTwo)),
+    ?assertEqual(2, proplists:get_value(page_size, PageOfTwo)),
+    ?assertEqual(10, proplists:get_value(page_count, PageOfTwo)),
+    assert_list([[{name, <<"">>}, {vhost, <<"/">>}],
+                [{name, <<"amq.direct">>}, {vhost, <<"/">>}]
+               ], proplists:get_value(items, PageOfTwo)),
+
+    ByName = http_get(Config, "/exchanges?page=1&page_size=2&name=reg", ?OK),
+    ?assertEqual(19, proplists:get_value(total_count, ByName)),
+    ?assertEqual(2, proplists:get_value(filtered_count, ByName)),
+    ?assertEqual(2, proplists:get_value(item_count, ByName)),
+    ?assertEqual(1, proplists:get_value(page, ByName)),
+    ?assertEqual(2, proplists:get_value(page_size, ByName)),
+    ?assertEqual(1, proplists:get_value(page_count, ByName)),
+    assert_list([[{name, <<"test2_reg">>}, {vhost, <<"/">>}],
+                [{name, <<"reg_test3">>}, {vhost, <<"vh1">>}]
+               ], proplists:get_value(items, ByName)),
+
+
+    RegExByName = http_get(Config,
+                           "/exchanges?page=1&page_size=2&name=^(?=^reg)&use_regex=true",
+                           ?OK),
+    ?assertEqual(19, proplists:get_value(total_count, RegExByName)),
+    ?assertEqual(1, proplists:get_value(filtered_count, RegExByName)),
+    ?assertEqual(1, proplists:get_value(item_count, RegExByName)),
+    ?assertEqual(1, proplists:get_value(page, RegExByName)),
+    ?assertEqual(2, proplists:get_value(page_size, RegExByName)),
+    ?assertEqual(1, proplists:get_value(page_count, RegExByName)),
+    assert_list([[{name, <<"reg_test3">>}, {vhost, <<"vh1">>}]
+               ], proplists:get_value(items, RegExByName)),
+
+
+    http_get(Config, "/exchanges?page=1000", ?BAD_REQUEST),
+    http_get(Config, "/exchanges?page=-1", ?BAD_REQUEST),
+    http_get(Config, "/exchanges?page=not_an_integer_value", ?BAD_REQUEST),
+    http_get(Config, "/exchanges?page=1&page_size=not_an_intger_value", ?BAD_REQUEST),
+    http_get(Config, "/exchanges?page=1&page_size=501", ?BAD_REQUEST), %% max 500 allowed
+    http_get(Config, "/exchanges?page=-1&page_size=-2", ?BAD_REQUEST),
+    http_delete(Config, "/exchanges/%2f/test0", ?NO_CONTENT),
+    http_delete(Config, "/exchanges/vh1/test1", ?NO_CONTENT),
+    http_delete(Config, "/exchanges/%2f/test2_reg", ?NO_CONTENT),
+    http_delete(Config, "/exchanges/vh1/reg_test3", ?NO_CONTENT),
+    http_delete(Config, "/vhosts/vh1", ?NO_CONTENT),
+    passed.
+
+exchanges_pagination_permissions_test(Config) ->
+    http_put(Config, "/users/admin",   [{password, <<"admin">>},
+                                        {tags, <<"administrator">>}], [?CREATED, ?NO_CONTENT]),
+    Perms = [{configure, <<".*">>},
+            {write,     <<".*">>},
+            {read,      <<".*">>}],
+    http_put(Config, "/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/vh1/admin",   Perms, [?CREATED, ?NO_CONTENT]),
+    QArgs = [],
+    http_put(Config, "/exchanges/%2f/test0", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/exchanges/vh1/test1", QArgs, "admin","admin", [?CREATED, ?NO_CONTENT]),
+    FirstPage = http_get(Config, "/exchanges?page=1&name=test1","admin","admin", ?OK),
+    ?assertEqual(8, proplists:get_value(total_count, FirstPage)),
+    ?assertEqual(1, proplists:get_value(item_count, FirstPage)),
+    ?assertEqual(1, proplists:get_value(page, FirstPage)),
+    ?assertEqual(100, proplists:get_value(page_size, FirstPage)),
+    ?assertEqual(1, proplists:get_value(page_count, FirstPage)),
+    assert_list([[{name, <<"test1">>}, {vhost, <<"vh1">>}]
+               ], proplists:get_value(items, FirstPage)),
+    http_delete(Config, "/exchanges/%2f/test0", ?NO_CONTENT),
+    http_delete(Config, "/exchanges/vh1/test1","admin","admin", ?NO_CONTENT),
+    http_delete(Config, "/users/admin", ?NO_CONTENT),
+    passed.
+
+
+
+queue_pagination_test(Config) ->
+    QArgs = [],
+    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+    http_put(Config, "/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/vh1/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
+
+    http_get(Config, "/queues/vh1?page=1&page_size=2", ?OK),
+
+    http_put(Config, "/queues/%2f/test0", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/vh1/test1", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/%2f/test2_reg", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/vh1/reg_test3", QArgs, [?CREATED, ?NO_CONTENT]),
+    PageOfTwo = http_get(Config, "/queues?page=1&page_size=2", ?OK),
+    ?assertEqual(4, proplists:get_value(total_count, PageOfTwo)),
+    ?assertEqual(4, proplists:get_value(filtered_count, PageOfTwo)),
+    ?assertEqual(2, proplists:get_value(item_count, PageOfTwo)),
+    ?assertEqual(1, proplists:get_value(page, PageOfTwo)),
+    ?assertEqual(2, proplists:get_value(page_size, PageOfTwo)),
+    ?assertEqual(2, proplists:get_value(page_count, PageOfTwo)),
+    assert_list([[{name, <<"test0">>}, {vhost, <<"/">>}],
+                [{name, <<"test2_reg">>}, {vhost, <<"/">>}]
+               ], proplists:get_value(items, PageOfTwo)),
+
+    SortedByName = http_get(Config, "/queues?sort=name&page=1&page_size=2", ?OK),
+    ?assertEqual(4, proplists:get_value(total_count, SortedByName)),
+    ?assertEqual(4, proplists:get_value(filtered_count, SortedByName)),
+    ?assertEqual(2, proplists:get_value(item_count, SortedByName)),
+    ?assertEqual(1, proplists:get_value(page, SortedByName)),
+    ?assertEqual(2, proplists:get_value(page_size, SortedByName)),
+    ?assertEqual(2, proplists:get_value(page_count, SortedByName)),
+    assert_list([[{name, <<"reg_test3">>}, {vhost, <<"vh1">>}],
+                [{name, <<"test0">>}, {vhost, <<"/">>}]
+               ], proplists:get_value(items, SortedByName)),
+
+
+    FirstPage = http_get(Config, "/queues?page=1", ?OK),
+    ?assertEqual(4, proplists:get_value(total_count, FirstPage)),
+    ?assertEqual(4, proplists:get_value(filtered_count, FirstPage)),
+    ?assertEqual(4, proplists:get_value(item_count, FirstPage)),
+    ?assertEqual(1, proplists:get_value(page, FirstPage)),
+    ?assertEqual(100, proplists:get_value(page_size, FirstPage)),
+    ?assertEqual(1, proplists:get_value(page_count, FirstPage)),
+    assert_list([[{name, <<"test0">>}, {vhost, <<"/">>}],
+                [{name, <<"test1">>}, {vhost, <<"vh1">>}],
+                [{name, <<"test2_reg">>}, {vhost, <<"/">>}],
+                [{name, <<"reg_test3">>}, {vhost, <<"vh1">>}]
+               ], proplists:get_value(items, FirstPage)),
+
+
+    ReverseSortedByName = http_get(Config,
+                                   "/queues?page=2&page_size=2&sort=name&sort_reverse=true",
+                                   ?OK),
+    ?assertEqual(4, proplists:get_value(total_count, ReverseSortedByName)),
+    ?assertEqual(4, proplists:get_value(filtered_count, ReverseSortedByName)),
+    ?assertEqual(2, proplists:get_value(item_count, ReverseSortedByName)),
+    ?assertEqual(2, proplists:get_value(page, ReverseSortedByName)),
+    ?assertEqual(2, proplists:get_value(page_size, ReverseSortedByName)),
+    ?assertEqual(2, proplists:get_value(page_count, ReverseSortedByName)),
+    assert_list([[{name, <<"test0">>}, {vhost, <<"/">>}],
+                [{name, <<"reg_test3">>}, {vhost, <<"vh1">>}]
+               ], proplists:get_value(items, ReverseSortedByName)),
+
+
+    ByName = http_get(Config, "/queues?page=1&page_size=2&name=reg", ?OK),
+    ?assertEqual(4, proplists:get_value(total_count, ByName)),
+    ?assertEqual(2, proplists:get_value(filtered_count, ByName)),
+    ?assertEqual(2, proplists:get_value(item_count, ByName)),
+    ?assertEqual(1, proplists:get_value(page, ByName)),
+    ?assertEqual(2, proplists:get_value(page_size, ByName)),
+    ?assertEqual(1, proplists:get_value(page_count, ByName)),
+    assert_list([[{name, <<"test2_reg">>}, {vhost, <<"/">>}],
+                [{name, <<"reg_test3">>}, {vhost, <<"vh1">>}]
+               ], proplists:get_value(items, ByName)),
+
+    RegExByName = http_get(Config,
+                           "/queues?page=1&page_size=2&name=^(?=^reg)&use_regex=true",
+                           ?OK),
+    ?assertEqual(4, proplists:get_value(total_count, RegExByName)),
+    ?assertEqual(1, proplists:get_value(filtered_count, RegExByName)),
+    ?assertEqual(1, proplists:get_value(item_count, RegExByName)),
+    ?assertEqual(1, proplists:get_value(page, RegExByName)),
+    ?assertEqual(2, proplists:get_value(page_size, RegExByName)),
+    ?assertEqual(1, proplists:get_value(page_count, RegExByName)),
+    assert_list([[{name, <<"reg_test3">>}, {vhost, <<"vh1">>}]
+               ], proplists:get_value(items, RegExByName)),
+
+
+    http_get(Config, "/queues?page=1000", ?BAD_REQUEST),
+    http_get(Config, "/queues?page=-1", ?BAD_REQUEST),
+    http_get(Config, "/queues?page=not_an_integer_value", ?BAD_REQUEST),
+    http_get(Config, "/queues?page=1&page_size=not_an_intger_value", ?BAD_REQUEST),
+    http_get(Config, "/queues?page=1&page_size=501", ?BAD_REQUEST), %% max 500 allowed
+    http_get(Config, "/queues?page=-1&page_size=-2", ?BAD_REQUEST),
+    http_delete(Config, "/queues/%2f/test0", ?NO_CONTENT),
+    http_delete(Config, "/queues/vh1/test1", ?NO_CONTENT),
+    http_delete(Config, "/queues/%2f/test2_reg", ?NO_CONTENT),
+    http_delete(Config, "/queues/vh1/reg_test3", ?NO_CONTENT),
+    http_delete(Config, "/vhosts/vh1", ?NO_CONTENT),
+    passed.
+
+queues_pagination_permissions_test(Config) ->
+    http_put(Config, "/users/admin",   [{password, <<"admin">>},
+                                        {tags, <<"administrator">>}], [?CREATED, ?NO_CONTENT]),
+    Perms = [{configure, <<".*">>},
+            {write,     <<".*">>},
+            {read,      <<".*">>}],
+    http_put(Config, "/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/vh1/admin",   Perms, [?CREATED, ?NO_CONTENT]),
+    QArgs = [],
+    http_put(Config, "/queues/%2f/test0", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/vh1/test1", QArgs, "admin","admin", [?CREATED, ?NO_CONTENT]),
+    FirstPage = http_get(Config, "/queues?page=1","admin","admin", ?OK),
+    ?assertEqual(1, proplists:get_value(total_count, FirstPage)),
+    ?assertEqual(1, proplists:get_value(item_count, FirstPage)),
+    ?assertEqual(1, proplists:get_value(page, FirstPage)),
+    ?assertEqual(100, proplists:get_value(page_size, FirstPage)),
+    ?assertEqual(1, proplists:get_value(page_count, FirstPage)),
+    assert_list([[{name, <<"test1">>}, {vhost, <<"vh1">>}]
+               ], proplists:get_value(items, FirstPage)),
+    http_delete(Config, "/queues/%2f/test0", ?NO_CONTENT),
+    http_delete(Config, "/queues/vh1/test1","admin","admin", ?NO_CONTENT),
+    http_delete(Config, "/users/admin", ?NO_CONTENT),
+    passed.
+
+samples_range_test(Config) ->
+    
+    {Conn, Ch} = open_connection_and_channel(Config),
+
+    %% Channels.
+
+    [ConnInfo | _] = http_get(Config, "/channels?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    {_, ConnDetails} = lists:keyfind(connection_details, 1, ConnInfo),
+    {_, ConnName0} = lists:keyfind(name, 1, ConnDetails),
+    ConnName = http_uri:encode(binary_to_list(ConnName0)),
+    ChanName = ConnName ++ http_uri:encode(" (1)"),
+
+    http_get(Config, "/channels/" ++ ChanName ++ "?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/channels/" ++ ChanName ++ "?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    http_get(Config, "/vhosts/%2f/channels?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/vhosts/%2f/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    %% Connections.
+
+    http_get(Config, "/connections?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/connections?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    http_get(Config, "/connections/" ++ ConnName ++ "?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/connections/" ++ ConnName ++ "?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    http_get(Config, "/connections/" ++ ConnName ++ "/channels?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/connections/" ++ ConnName ++ "/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    http_get(Config, "/vhosts/%2f/connections?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/vhosts/%2f/connections?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    amqp_channel:close(Ch),
+    amqp_connection:close(Conn),
+
+    %% Exchanges.
+
+    http_get(Config, "/exchanges?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/exchanges?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    http_get(Config, "/exchanges/%2f/amq.direct?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/exchanges/%2f/amq.direct?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    %% Nodes.
+
+    http_get(Config, "/nodes?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/nodes?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    %% Overview.
+
+    http_get(Config, "/overview?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/overview?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    %% Queues.
+
+    http_put(Config, "/queues/%2f/test0", [], [?CREATED, ?NO_CONTENT]),
+
+    http_get(Config, "/queues/%2f?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/queues/%2f?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+    http_get(Config, "/queues/%2f/test0?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/queues/%2f/test0?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    http_delete(Config, "/queues/%2f/test0", ?NO_CONTENT),
+
+    %% Vhosts.
+
+    http_put(Config, "/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
+
+    http_get(Config, "/vhosts?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/vhosts?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+    http_get(Config, "/vhosts/vh1?lengths_age=60&lengths_incr=1", ?OK),
+    http_get(Config, "/vhosts/vh1?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
+
+    http_delete(Config, "/vhosts/vh1", ?NO_CONTENT),
+
+    passed.
+
+sorting_test(Config) ->
+    QArgs = [],
+    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+    http_put(Config, "/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/vh1/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/%2f/test0", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/vh1/test1", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/%2f/test2", QArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/vh1/test3", QArgs, [?CREATED, ?NO_CONTENT]),
+    assert_list([[{name, <<"test0">>}],
+                 [{name, <<"test2">>}],
+                 [{name, <<"test1">>}],
+                 [{name, <<"test3">>}]], http_get(Config, "/queues", ?OK)),
+    assert_list([[{name, <<"test0">>}],
+                 [{name, <<"test1">>}],
+                 [{name, <<"test2">>}],
+                 [{name, <<"test3">>}]], http_get(Config, "/queues?sort=name", ?OK)),
+    assert_list([[{name, <<"test0">>}],
+                 [{name, <<"test2">>}],
+                 [{name, <<"test1">>}],
+                 [{name, <<"test3">>}]], http_get(Config, "/queues?sort=vhost", ?OK)),
+    assert_list([[{name, <<"test3">>}],
+                 [{name, <<"test1">>}],
+                 [{name, <<"test2">>}],
+                 [{name, <<"test0">>}]], http_get(Config, "/queues?sort_reverse=true", ?OK)),
+    assert_list([[{name, <<"test3">>}],
+                 [{name, <<"test2">>}],
+                 [{name, <<"test1">>}],
+                 [{name, <<"test0">>}]], http_get(Config, "/queues?sort=name&sort_reverse=true", ?OK)),
+    assert_list([[{name, <<"test3">>}],
+                 [{name, <<"test1">>}],
+                 [{name, <<"test2">>}],
+                 [{name, <<"test0">>}]], http_get(Config, "/queues?sort=vhost&sort_reverse=true", ?OK)),
+    %% Rather poor but at least test it doesn't blow up with dots
+    http_get(Config, "/queues?sort=owner_pid_details.name", ?OK),
+    http_delete(Config, "/queues/%2f/test0", ?NO_CONTENT),
+    http_delete(Config, "/queues/vh1/test1", ?NO_CONTENT),
+    http_delete(Config, "/queues/%2f/test2", ?NO_CONTENT),
+    http_delete(Config, "/queues/vh1/test3", ?NO_CONTENT),
+    http_delete(Config, "/vhosts/vh1", ?NO_CONTENT),
+    passed.
+
+format_output_test(Config) ->
+    QArgs = [],
+    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
+    http_put(Config, "/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/vh1/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/%2f/test0", QArgs, [?CREATED, ?NO_CONTENT]),
+    assert_list([[{name, <<"test0">>},
+                 {consumer_utilisation, null},
+                 {exclusive_consumer_tag, null},
+                 {recoverable_slaves, null}]], http_get(Config, "/queues", ?OK)),
+    http_delete(Config, "/queues/%2f/test0", ?NO_CONTENT),
+    http_delete(Config, "/vhosts/vh1", ?NO_CONTENT),
+    passed.
+
+columns_test(Config) ->
+    http_put(Config, "/queues/%2f/test", [{arguments, [{<<"foo">>, <<"bar">>}]}],
+             [?CREATED, ?NO_CONTENT]),
+    [List] = http_get(Config, "/queues?columns=arguments.foo,name", ?OK),
+    [{arguments, [{foo, <<"bar">>}]}, {name, <<"test">>}] = lists:sort(List),
+    [{arguments, [{foo, <<"bar">>}]}, {name, <<"test">>}] =
+        lists:sort(http_get(Config, "/queues/%2f/test?columns=arguments.foo,name", ?OK)),
+    http_delete(Config, "/queues/%2f/test", ?NO_CONTENT),
+    passed.
+
+get_test(Config) ->
+    %% Real world example...
+    Headers = [{<<"x-forwarding">>, array,
+                [{table,
+                  [{<<"uri">>, longstr,
+                    <<"amqp://localhost/%2f/upstream">>}]}]}],
+    http_put(Config, "/queues/%2f/myqueue", [], [?CREATED, ?NO_CONTENT]),
+    {Conn, Ch} = open_connection_and_channel(Config),
+    Publish = fun (Payload) ->
+                      amqp_channel:cast(
+                        Ch, #'basic.publish'{exchange = <<>>,
+                                             routing_key = <<"myqueue">>},
+                        #amqp_msg{props = #'P_basic'{headers = Headers},
+                                  payload = Payload})
+              end,
+    Publish(<<"1aaa">>),
+    Publish(<<"2aaa">>),
+    Publish(<<"3aaa">>),
+    amqp_channel:close(Ch),
+    close_connection(Conn),
+    [Msg] = http_post(Config, "/queues/%2f/myqueue/get", [{requeue,  false},
+                                                          {count,    1},
+                                                          {encoding, auto},
+                                                          {truncate, 1}], ?OK),
+    false         = pget(redelivered, Msg),
+    <<>>          = pget(exchange,    Msg),
+    <<"myqueue">> = pget(routing_key, Msg),
+    <<"1">>       = pget(payload,     Msg),
+    [{'x-forwarding',
+      [[{uri,<<"amqp://localhost/%2f/upstream">>}]]}] =
+        pget(headers, pget(properties, Msg)),
+
+    [M2, M3] = http_post(Config, "/queues/%2f/myqueue/get", [{requeue,  true},
+                                                             {count,    5},
+                                                             {encoding, auto}], ?OK),
+    <<"2aaa">> = pget(payload, M2),
+    <<"3aaa">> = pget(payload, M3),
+    2 = length(http_post(Config, "/queues/%2f/myqueue/get", [{requeue,  false},
+                                                             {count,    5},
+                                                             {encoding, auto}], ?OK)),
+    [] = http_post(Config, "/queues/%2f/myqueue/get", [{requeue,  false},
+                                                       {count,    5},
+                                                       {encoding, auto}], ?OK),
+    http_delete(Config, "/queues/%2f/myqueue", ?NO_CONTENT),
+    passed.
+
+get_fail_test(Config) ->
+    http_put(Config, "/users/myuser", [{password, <<"password">>},
+                                       {tags, <<"management">>}], ?NO_CONTENT),
+    http_put(Config, "/queues/%2f/myqueue", [], [?CREATED, ?NO_CONTENT]),
+    http_post(Config, "/queues/%2f/myqueue/get",
+              [{requeue,  false},
+               {count,    1},
+               {encoding, auto}], "myuser", "password", ?NOT_AUTHORISED),
+    http_delete(Config, "/queues/%2f/myqueue", ?NO_CONTENT),
+    http_delete(Config, "/users/myuser", ?NO_CONTENT),
+    passed.
+
+publish_test(Config) ->
+    Headers = [{'x-forwarding', [[{uri,<<"amqp://localhost/%2f/upstream">>}]]}],
+    Msg = msg(<<"myqueue">>, Headers, <<"Hello world">>),
+    http_put(Config, "/queues/%2f/myqueue", [], [?CREATED, ?NO_CONTENT]),
+    ?assertEqual([{routed, true}],
+                 http_post(Config, "/exchanges/%2f/amq.default/publish", Msg, ?OK)),
+    [Msg2] = http_post(Config, "/queues/%2f/myqueue/get", [{requeue,  false},
+                                                           {count,    1},
+                                                           {encoding, auto}], ?OK),
+    assert_item(Msg, Msg2),
+    http_post(Config, "/exchanges/%2f/amq.default/publish", Msg2, ?OK),
+    [Msg3] = http_post(Config, "/queues/%2f/myqueue/get", [{requeue,  false},
+                                                           {count,    1},
+                                                           {encoding, auto}], ?OK),
+    assert_item(Msg, Msg3),
+    http_delete(Config, "/queues/%2f/myqueue", ?NO_CONTENT),
+    passed.
+
+publish_accept_json_test(Config) ->
+    Headers = [{'x-forwarding', [[{uri, <<"amqp://localhost/%2f/upstream">>}]]}],
+    Msg = msg(<<"myqueue">>, Headers, <<"Hello world">>),
+    http_put(Config, "/queues/%2f/myqueue", [], [?CREATED, ?NO_CONTENT]),
+    ?assertEqual([{routed, true}],
+                http_post_accept_json(Config, "/exchanges/%2f/amq.default/publish",
+                                      Msg, ?OK)),
+
+    [Msg2] = http_post_accept_json(Config, "/queues/%2f/myqueue/get",
+                                  [{requeue, false},
+                                   {count, 1},
+                                   {encoding, auto}], ?OK),
+    assert_item(Msg, Msg2),
+    http_post_accept_json(Config, "/exchanges/%2f/amq.default/publish", Msg2, ?OK),
+    [Msg3] = http_post_accept_json(Config, "/queues/%2f/myqueue/get",
+                                  [{requeue, false},
+                                   {count, 1},
+                                   {encoding, auto}], ?OK),
+    assert_item(Msg, Msg3),
+    http_delete(Config, "/queues/%2f/myqueue", ?NO_CONTENT),
+    passed.
+
+publish_fail_test(Config) ->
+    Msg = msg(<<"myqueue">>, [], <<"Hello world">>),
+    http_put(Config, "/queues/%2f/myqueue", [], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/users/myuser", [{password, <<"password">>},
+                                       {tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
+    http_post(Config, "/exchanges/%2f/amq.default/publish", Msg, "myuser", "password",
+              ?NOT_AUTHORISED),
+    Msg2 = [{exchange,         <<"">>},
+            {routing_key,      <<"myqueue">>},
+            {properties,       [{user_id, <<"foo">>}]},
+            {payload,          <<"Hello world">>},
+            {payload_encoding, <<"string">>}],
+    http_post(Config, "/exchanges/%2f/amq.default/publish", Msg2, ?BAD_REQUEST),
+    Msg3 = [{exchange,         <<"">>},
+            {routing_key,      <<"myqueue">>},
+            {properties,       []},
+            {payload,          [<<"not a string">>]},
+            {payload_encoding, <<"string">>}],
+    http_post(Config, "/exchanges/%2f/amq.default/publish", Msg3, ?BAD_REQUEST),
+    MsgTemplate = [{exchange,         <<"">>},
+                   {routing_key,      <<"myqueue">>},
+                   {payload,          <<"Hello world">>},
+                   {payload_encoding, <<"string">>}],
+    [http_post(Config, "/exchanges/%2f/amq.default/publish",
+               [{properties, [BadProp]} | MsgTemplate], ?BAD_REQUEST)
+     || BadProp <- [{priority,   <<"really high">>},
+                    {timestamp,  <<"recently">>},
+                    {expiration, 1234}]],
+    http_delete(Config, "/users/myuser", ?NO_CONTENT),
+    passed.
+
+publish_base64_test(Config) ->
+    Msg     = msg(<<"myqueue">>, [], <<"YWJjZA==">>, <<"base64">>),
+    BadMsg1 = msg(<<"myqueue">>, [], <<"flibble">>,  <<"base64">>),
+    BadMsg2 = msg(<<"myqueue">>, [], <<"YWJjZA==">>, <<"base99">>),
+    http_put(Config, "/queues/%2f/myqueue", [], [?CREATED, ?NO_CONTENT]),
+    http_post(Config, "/exchanges/%2f/amq.default/publish", Msg, ?OK),
+    http_post(Config, "/exchanges/%2f/amq.default/publish", BadMsg1, ?BAD_REQUEST),
+    http_post(Config, "/exchanges/%2f/amq.default/publish", BadMsg2, ?BAD_REQUEST),
+    [Msg2] = http_post(Config, "/queues/%2f/myqueue/get", [{requeue,  false},
+                                                           {count,    1},
+                                                           {encoding, auto}], ?OK),
+    ?assertEqual(<<"abcd">>, pget(payload, Msg2)),
+    http_delete(Config, "/queues/%2f/myqueue", ?NO_CONTENT),
+    passed.
+
+publish_unrouted_test(Config) ->
+    Msg = msg(<<"hmmm">>, [], <<"Hello world">>),
+    ?assertEqual([{routed, false}],
+                 http_post(Config, "/exchanges/%2f/amq.default/publish", Msg, ?OK)).
+
+if_empty_unused_test(Config) ->
+    http_put(Config, "/exchanges/%2f/test", [], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/queues/%2f/test", [], [?CREATED, ?NO_CONTENT]),
+    http_post(Config, "/bindings/%2f/e/test/q/test", [], [?CREATED, ?NO_CONTENT]),
+    http_post(Config, "/exchanges/%2f/amq.default/publish",
+              msg(<<"test">>, [], <<"Hello world">>), ?OK),
+    http_delete(Config, "/queues/%2f/test?if-empty=true", ?BAD_REQUEST),
+    http_delete(Config, "/exchanges/%2f/test?if-unused=true", ?BAD_REQUEST),
+    http_delete(Config, "/queues/%2f/test/contents", ?NO_CONTENT),
+
+    {Conn, _ConnPath, _ChPath, _ConnChPath} = get_conn(Config, "guest", "guest"),
+    {ok, Ch} = amqp_connection:open_channel(Conn),
+    amqp_channel:subscribe(Ch, #'basic.consume'{queue = <<"test">> }, self()),
+    http_delete(Config, "/queues/%2f/test?if-unused=true", ?BAD_REQUEST),
+    amqp_connection:close(Conn),
+
+    http_delete(Config, "/queues/%2f/test?if-empty=true", ?NO_CONTENT),
+    http_delete(Config, "/exchanges/%2f/test?if-unused=true", ?NO_CONTENT),
+    passed.
+
+parameters_test(Config) ->
+    register_parameters_and_policy_validator(Config),
+
+    http_put(Config, "/parameters/test/%2f/good", [{value, <<"ignore">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/parameters/test/%2f/maybe", [{value, <<"good">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/parameters/test/%2f/maybe", [{value, <<"bad">>}], ?BAD_REQUEST),
+    http_put(Config, "/parameters/test/%2f/bad", [{value, <<"good">>}], ?BAD_REQUEST),
+    http_put(Config, "/parameters/test/um/good", [{value, <<"ignore">>}], ?NOT_FOUND),
+
+    Good = [{vhost,     <<"/">>},
+            {component, <<"test">>},
+            {name,      <<"good">>},
+            {value,     <<"ignore">>}],
+    Maybe = [{vhost,     <<"/">>},
+             {component, <<"test">>},
+             {name,      <<"maybe">>},
+             {value,     <<"good">>}],
+    List = [Good, Maybe],
+
+    assert_list(List, http_get(Config, "/parameters")),
+    assert_list(List, http_get(Config, "/parameters/test")),
+    assert_list(List, http_get(Config, "/parameters/test/%2f")),
+    assert_list([],   http_get(Config, "/parameters/oops")),
+    http_get(Config, "/parameters/test/oops", ?NOT_FOUND),
+
+    assert_item(Good,  http_get(Config, "/parameters/test/%2f/good", ?OK)),
+    assert_item(Maybe, http_get(Config, "/parameters/test/%2f/maybe", ?OK)),
+
+    http_delete(Config, "/parameters/test/%2f/good", ?NO_CONTENT),
+    http_delete(Config, "/parameters/test/%2f/maybe", ?NO_CONTENT),
+    http_delete(Config, "/parameters/test/%2f/bad", ?NOT_FOUND),
+
+    0 = length(http_get(Config, "/parameters")),
+    0 = length(http_get(Config, "/parameters/test")),
+    0 = length(http_get(Config, "/parameters/test/%2f")),
+    unregister_parameters_and_policy_validator(Config),
+    passed.
+
+policy_test(Config) ->
+    register_parameters_and_policy_validator(Config),
+    PolicyPos  = [{vhost,      <<"/">>},
+                  {name,       <<"policy_pos">>},
+                  {pattern,    <<".*">>},
+                  {definition, [{testpos,[1,2,3]}]},
+                  {priority,   10}],
+    PolicyEven = [{vhost,      <<"/">>},
+                  {name,       <<"policy_even">>},
+                  {pattern,    <<".*">>},
+                  {definition, [{testeven,[1,2,3,4]}]},
+                  {priority,   10}],
+    http_put(Config,
+             "/policies/%2f/policy_pos",
+             lists:keydelete(key, 1, PolicyPos),
+             [?CREATED, ?NO_CONTENT]),
+    http_put(Config,
+             "/policies/%2f/policy_even",
+             lists:keydelete(key, 1, PolicyEven),
+             [?CREATED, ?NO_CONTENT]),
+    assert_item(PolicyPos,  http_get(Config, "/policies/%2f/policy_pos",  ?OK)),
+    assert_item(PolicyEven, http_get(Config, "/policies/%2f/policy_even", ?OK)),
+    List = [PolicyPos, PolicyEven],
+    assert_list(List, http_get(Config, "/policies",     ?OK)),
+    assert_list(List, http_get(Config, "/policies/%2f", ?OK)),
+
+    http_delete(Config, "/policies/%2f/policy_pos", ?NO_CONTENT),
+    http_delete(Config, "/policies/%2f/policy_even", ?NO_CONTENT),
+    0 = length(http_get(Config, "/policies")),
+    0 = length(http_get(Config, "/policies/%2f")),
+    unregister_parameters_and_policy_validator(Config),
+    passed.
+
+policy_permissions_test(Config) ->
+    register_parameters_and_policy_validator(Config),
+
+    http_put(Config, "/users/admin",  [{password, <<"admin">>},
+                                       {tags, <<"administrator">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/users/mon",    [{password, <<"mon">>},
+                                       {tags, <<"monitoring">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/users/policy", [{password, <<"policy">>},
+                                       {tags, <<"policymaker">>}], [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/users/mgmt",   [{password, <<"mgmt">>},
+                                       {tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
+    Perms = [{configure, <<".*">>},
+             {write,     <<".*">>},
+             {read,      <<".*">>}],
+    http_put(Config, "/vhosts/v", none, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/v/admin",  Perms, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/v/mon",    Perms, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/v/policy", Perms, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/permissions/v/mgmt",   Perms, [?CREATED, ?NO_CONTENT]),
+
+    Policy = [{pattern,    <<".*">>},
+              {definition, [{<<"ha-mode">>, <<"all">>}]}],
+    Param = [{value, <<"">>}],
+
+    http_put(Config, "/policies/%2f/HA", Policy, [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/parameters/test/%2f/good", Param, [?CREATED, ?NO_CONTENT]),
+
+    Pos = fun (U) ->
+                  Expected = case U of "admin" -> [?CREATED, ?NO_CONTENT]; _ -> ?NO_CONTENT end,
+                  http_put(Config, "/policies/v/HA",        Policy, U, U, Expected),
+                  http_put(Config,
+                           "/parameters/test/v/good",       Param, U, U, ?NO_CONTENT),
+                  1 = length(http_get(Config, "/policies",          U, U, ?OK)),
+                  1 = length(http_get(Config, "/parameters/test",   U, U, ?OK)),
+                  1 = length(http_get(Config, "/parameters",        U, U, ?OK)),
+                  1 = length(http_get(Config, "/policies/v",        U, U, ?OK)),
+                  1 = length(http_get(Config, "/parameters/test/v", U, U, ?OK)),
+                  http_get(Config, "/policies/v/HA",                U, U, ?OK),
+                  http_get(Config, "/parameters/test/v/good",       U, U, ?OK)
+          end,
+    Neg = fun (U) ->
+                  http_put(Config, "/policies/v/HA",    Policy, U, U, ?NOT_AUTHORISED),
+                  http_put(Config,
+                           "/parameters/test/v/good",   Param, U, U, ?NOT_AUTHORISED),
+                  http_put(Config,
+                           "/parameters/test/v/admin",  Param, U, U, ?NOT_AUTHORISED),
+                  %% Policies are read-only for management and monitoring.
+                  http_get(Config, "/policies",                 U, U, ?OK),
+                  http_get(Config, "/policies/v",               U, U, ?OK),
+                  http_get(Config, "/parameters",               U, U, ?NOT_AUTHORISED),
+                  http_get(Config, "/parameters/test",          U, U, ?NOT_AUTHORISED),
+                  http_get(Config, "/parameters/test/v",        U, U, ?NOT_AUTHORISED),
+                  http_get(Config, "/policies/v/HA",            U, U, ?NOT_AUTHORISED),
+                  http_get(Config, "/parameters/test/v/good",   U, U, ?NOT_AUTHORISED)
+          end,
+    AlwaysNeg =
+        fun (U) ->
+                http_put(Config, "/policies/%2f/HA",  Policy, U, U, ?NOT_AUTHORISED),
+                http_put(Config,
+                         "/parameters/test/%2f/good", Param, U, U, ?NOT_AUTHORISED),
+                http_get(Config, "/policies/%2f/HA",          U, U, ?NOT_AUTHORISED),
+                http_get(Config, "/parameters/test/%2f/good", U, U, ?NOT_AUTHORISED)
+        end,
+
+    [Neg(U) || U <- ["mon", "mgmt"]],
+    [Pos(U) || U <- ["admin", "policy"]],
+    [AlwaysNeg(U) || U <- ["mon", "mgmt", "admin", "policy"]],
+
+    %% This one is deliberately different between admin and policymaker.
+    http_put(Config, "/parameters/test/v/admin", Param, "admin", "admin", [?CREATED, ?NO_CONTENT]),
+    http_put(Config, "/parameters/test/v/admin", Param, "policy", "policy",
+             ?BAD_REQUEST),
+
+    http_delete(Config, "/vhosts/v", ?NO_CONTENT),
+    http_delete(Config, "/users/admin", ?NO_CONTENT),
+    http_delete(Config, "/users/mon", ?NO_CONTENT),
+    http_delete(Config, "/users/policy", ?NO_CONTENT),
+    http_delete(Config, "/users/mgmt", ?NO_CONTENT),
+    http_delete(Config, "/policies/%2f/HA", ?NO_CONTENT),
+
+    unregister_parameters_and_policy_validator(Config),
+    passed.
+
+issue67_test(Config)->
+    {ok, {{_, 401, _}, Headers, _}} = req(Config, get, "/queues",
+                                          [auth_header("user_no_access", "password_no_access")]),
+    ?assertEqual("application/json",
+                 proplists:get_value("content-type",Headers)),
+    passed.
+
+extensions_test(Config) ->
+    [[{javascript,<<"dispatcher.js">>}]] = http_get(Config, "/extensions", ?OK),
+    passed.
+
+cors_test(Config) ->
+    %% With CORS disabled. No header should be received.
+    {ok, {_, HdNoCORS, _}} = req(Config, get, "/overview", [auth_header("guest", "guest")]),
+    false = lists:keymember("access-control-allow-origin", 1, HdNoCORS),
+    %% The Vary header should include "Origin" regardless of CORS configuration.
+    {_, "Accept-Encoding, origin"} = lists:keyfind("vary", 1, HdNoCORS),
+    %% Enable CORS.
+    rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_management, cors_allow_origins, ["http://rabbitmq.com"]]),
+    %% We should only receive allow-origin and allow-credentials from GET.
+    {ok, {_, HdGetCORS, _}} = req(Config, get, "/overview",
+                                  [{"origin", "http://rabbitmq.com"}, auth_header("guest", "guest")]),
+    true = lists:keymember("access-control-allow-origin", 1, HdGetCORS),
+    true = lists:keymember("access-control-allow-credentials", 1, HdGetCORS),
+    false = lists:keymember("access-control-expose-headers", 1, HdGetCORS),
+    false = lists:keymember("access-control-max-age", 1, HdGetCORS),
+    false = lists:keymember("access-control-allow-methods", 1, HdGetCORS),
+    false = lists:keymember("access-control-allow-headers", 1, HdGetCORS),
+    %% We should receive allow-origin, allow-credentials and allow-methods from OPTIONS.
+    {ok, {_, HdOptionsCORS, _}} = req(Config, options, "/overview",
+                                      [{"origin", "http://rabbitmq.com"}, auth_header("guest", "guest")]),
+    true = lists:keymember("access-control-allow-origin", 1, HdOptionsCORS),
+    true = lists:keymember("access-control-allow-credentials", 1, HdOptionsCORS),
+    false = lists:keymember("access-control-expose-headers", 1, HdOptionsCORS),
+    true = lists:keymember("access-control-max-age", 1, HdOptionsCORS),
+    true = lists:keymember("access-control-allow-methods", 1, HdOptionsCORS),
+    false = lists:keymember("access-control-allow-headers", 1, HdOptionsCORS),
+    %% We should receive allow-headers when request-headers is sent.
+    {ok, {_, HdAllowHeadersCORS, _}} = req(Config, options, "/overview",
+                                           [{"origin", "http://rabbitmq.com"},
+                                            auth_header("guest", "guest"),
+                                            {"access-control-request-headers", "x-piggy-bank"}]),
+    {_, "x-piggy-bank"} = lists:keyfind("access-control-allow-headers", 1, HdAllowHeadersCORS),
+    %% Disable preflight request caching.
+    rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_management, cors_max_age, undefined]),
+    %% We shouldn't receive max-age anymore.
+    {ok, {_, HdNoMaxAgeCORS, _}} = req(Config, options, "/overview",
+                                       [{"origin", "http://rabbitmq.com"}, auth_header("guest", "guest")]),
+    false = lists:keymember("access-control-max-age", 1, HdNoMaxAgeCORS),
+    %% Disable CORS again.
+    rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_management, cors_allow_origins, []]),
+    passed.
+
+%% -------------------------------------------------------------------
+%% Helpers.
+%% -------------------------------------------------------------------
+
+msg(Key, Headers, Body) ->
+    msg(Key, Headers, Body, <<"string">>).
+
+msg(Key, Headers, Body, Enc) ->
+    [{exchange,         <<"">>},
+     {routing_key,      Key},
+     {properties,       [{delivery_mode, 2},
+                         {headers,       Headers}]},
+     {payload,          Body},
+     {payload_encoding, Enc}].
+
+local_port(Conn) ->
+    [{sock, Sock}] = amqp_connection:info(Conn, [sock]),
+    {ok, Port} = inet:port(Sock),
+    Port.
+
+spawn_invalid(_Config, 0) ->
+    ok;
+spawn_invalid(Config, N) ->
+    Self = self(),
+    spawn(fun() ->
+                  timer:sleep(rand_compat:uniform(250)),
+                  {ok, Sock} = gen_tcp:connect("localhost", amqp_port(Config), [list]),
+                  ok = gen_tcp:send(Sock, "Some Data"),
+                  receive_msg(Self)
+          end),
+    spawn_invalid(Config, N-1).
+
+receive_msg(Self) ->
+    receive
+        {tcp, _, [$A, $M, $Q, $P | _]} ->
+            Self ! done
+    after
+        60000 ->
+            Self ! no_reply
+    end.
+
+wait_for_answers(0) ->
+    ok;
+wait_for_answers(N) ->
+    receive
+        done ->
+            wait_for_answers(N-1);
+        no_reply ->
+            throw(no_reply)
+    end.
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_rabbitmqadmin_SUITE.erl b/rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_rabbitmqadmin_SUITE.erl
new file mode 100644 (file)
index 0000000..80d5527
--- /dev/null
@@ -0,0 +1,435 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_rabbitmqadmin_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+all() ->
+    [ {group, list_to_atom(Py)} || Py <- find_pythons() ].
+
+groups() ->
+    Tests = [
+             help,
+             host,
+             config,
+             user,
+             fmt_long,
+             fmt_kvp,
+             fmt_tsv,
+             fmt_table,
+             fmt_bash,
+             vhosts,
+             users,
+             permissions,
+             alt_vhost,
+             exchanges,
+             queues,
+             bindings,
+             policies,
+             parameters,
+             publish,
+             ignore_vhost,
+             sort
+            ],
+    [ {list_to_atom(Py), [], Tests} || Py <- find_pythons() ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    inets:start(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+                                                    {rmq_nodename_suffix, ?MODULE}
+                                                   ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+                                      rabbit_ct_broker_helpers:setup_steps() ++
+                                      rabbit_ct_client_helpers:setup_steps() ++
+                                      [fun (C) ->
+                                               rabbit_ct_helpers:set_config(C,
+                                                                            {rabbitmqadmin_path,
+                                                                             rabbitmqadmin(C)})
+                                       end
+                                      ]).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+                                         rabbit_ct_client_helpers:teardown_steps() ++
+                                             rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(python2, Config) ->
+    rabbit_ct_helpers:set_config(Config, {python, "python2"});
+init_per_group(python3, Config) ->
+    rabbit_ct_helpers:set_config(Config, {python, "python3"});
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(config, Config) ->
+    rabbit_ct_helpers:set_config(Config, {env_home, os:getenv("HOME")});
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(config, Config) ->
+    Home = rabbit_ct_helpers:get_config(Config, env_home),
+    os:putenv("HOME", Home);
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+help(Config) ->
+    {ok, _} = run(Config, ["--help"]),
+    {ok, _} = run(Config, ["help", "subcommands"]),
+    {ok, _} = run(Config, ["help", "config"]),
+    {error, _, _} = run(Config, ["help", "astronomy"]).
+
+host(Config) ->
+    {ok, _} = run(Config, ["show", "overview"]),
+    {ok, _} = run(Config, ["--host", "localhost", "show", "overview"]),
+    {error, _, _} = run(Config, ["--host", "some-host-that-does-not-exist",
+                                 "show", "overview"]).
+
+config(Config) ->
+    PrivDir = ?config(priv_dir, Config),
+    os:putenv("HOME", PrivDir),
+    {_DefConf, TestConf} = write_test_config(Config),
+    {error, _, _} = run(Config, ["--config", "/tmp/no-such-config-file",
+                                 "show", "overview"]),
+    {ok, _} = run(Config, ["--config", TestConf, "--node",
+                           "host_normal", "show", "overview"]),
+
+    % test 'default node in the config file' where "default" uses an invalid host
+    {error, _, _} = run(Config, ["--config", TestConf, "show", "overview"]),
+    {ok, _} = run(Config, ["show", "overview"]),
+    {error, _, _} = run(Config, ["--node", "non_default", "show", "overview"]).
+
+user(Config) ->
+    {ok, _} = run(Config, ["--user", "guest", "--password", "guest", "show", "overview"]),
+    {error, _, _} = run(Config, ["--user", "no", "--password", "guest", "show", "overview"]),
+    {error, _, _} = run(Config, ["--user", "guest", "--password", "no", "show", "overview"]).
+
+fmt_long(Config) ->
+    Out = multi_line_string([
+        "",
+        "--------------------------------------------------------------------------------",
+        "",
+        "   name: /",
+        "tracing: False",
+        "",
+        "--------------------------------------------------------------------------------",
+        "" ]),
+    {ok, Out} = run(Config, ["--format", "long", "list", "vhosts", "name", "tracing"]).
+
+fmt_kvp(Config) ->
+    Out = multi_line_string(["name=\"/\" tracing=\"False\""]),
+    {ok, Out} = run(Config, ["--format", "kvp", "list", "vhosts", "name", "tracing"]).
+
+fmt_tsv(Config) ->
+    Out = multi_line_string([
+                             "name\ttracing",
+                             "/\tFalse"
+                            ]),
+    {ok, Out} = run(Config, ["--format", "tsv", "list", "vhosts", "name", "tracing"]).
+
+fmt_table(Config) ->
+    Out = multi_line_string([
+                             "+------+---------+",
+                             "| name | tracing |",
+                             "+------+---------+",
+                             "| /    | False   |",
+                             "+------+---------+"
+                            ]),
+    {ok, Out} = run(Config, ["list", "vhosts", "name", "tracing"]),
+    {ok, Out} = run(Config, ["--format", "table", "list",
+                             "vhosts", "name", "tracing"]).
+
+fmt_bash(Config) ->
+    {ok, "/\n"} = run(Config, ["--format", "bash", "list",
+                               "vhosts", "name", "tracing"]).
+
+vhosts(Config) ->
+    {ok, ["/"]} = run_list(Config, l("vhosts")),
+    {ok, _} = run(Config, ["declare", "vhost", "name=foo"]),
+    {ok, ["/", "foo"]} = run_list(Config, l("vhosts")),
+    {ok, _} = run(Config, ["delete", "vhost", "name=foo"]),
+    {ok, ["/"]} = run_list(Config, l("vhosts")).
+
+users(Config) ->
+    {ok, ["guest"]} = run_list(Config, l("users")),
+    {error, _, _} = run(Config, ["declare", "user", "name=foo"]),
+    {ok, _} = run(Config, ["declare", "user", "name=foo", "password=pass", "tags="]),
+    {ok, ["foo", "guest"]} = run_list(Config, l("users")),
+    {ok, _} = run(Config, ["delete", "user", "name=foo"]),
+    {ok, ["guest"]} = run_list(Config, l("users")).
+
+permissions(Config) ->
+    {ok, _} = run(Config, ["declare", "vhost", "name=foo"]),
+    {ok, _} = run(Config, ["declare", "user", "name=bar", "password=pass", "tags="]),
+    {ok, [["guest", "/"]]} = run_table(Config, ["list", "permissions",
+                                                "user", "vhost"]),
+    {ok, _} = run(Config, ["declare", "permission", "user=bar", "vhost=foo",
+                           "configure=.*", "write=.*", "read=.*"]),
+    {ok, [["guest", "/"], ["bar", "foo"]]} =  run_table(Config, ["list",
+                                                                 "permissions",
+                                                                 "user",
+                                                                 "vhost"]),
+    {ok, _} = run(Config, ["delete", "user", "name=bar"]),
+    {ok, _} = run(Config, ["delete", "vhost", "name=foo"]).
+
+alt_vhost(Config) ->
+    {ok, _} = run(Config, ["declare", "vhost", "name=foo"]),
+    {ok, _} = run(Config, ["declare", "permission", "user=guest", "vhost=foo",
+                           "configure=.*", "write=.*", "read=.*"]),
+    {ok, _} = run(Config, ["declare", "queue", "name=in_/"]),
+    {ok, _} = run(Config, ["--vhost", "foo", "declare", "queue", "name=in_foo"]),
+    {ok, [["/", "in_/"], ["foo", "in_foo"]]} = run_table(Config, ["list", "queues",
+                                                                  "vhost", "name"]),
+    {ok, _} = run(Config, ["--vhost", "foo", "delete", "queue", "name=in_foo"]),
+    {ok, _} = run(Config, ["delete", "queue", "name=in_/"]),
+    {ok, _} = run(Config, ["delete", "vhost", "name=foo"]).
+
+exchanges(Config) ->
+    {ok, _} = run(Config, ["declare", "exchange", "name=foo", "type=direct"]),
+    {ok, ["amq.direct",
+          "amq.fanout",
+          "amq.headers",
+          "amq.match",
+          "amq.rabbitmq.log",
+          "amq.rabbitmq.trace",
+          "amq.topic",
+          "foo"]} = run_list(Config, l("exchanges")),
+    {ok, _} = run(Config, ["delete", "exchange", "name=foo"]).
+
+queues(Config) ->
+    {ok, _} = run(Config, ["declare", "queue", "name=foo"]),
+    {ok, ["foo"]} = run_list(Config, l("queues")),
+    {ok, _} = run(Config, ["delete", "queue", "name=foo"]).
+
+bindings(Config) ->
+    {ok, _} = run(Config, ["declare", "queue", "name=foo"]),
+    {ok, _} = run(Config, ["declare", "binding", "source=amq.direct",
+                           "destination=foo", "destination_type=queue",
+                           "routing_key=test"]),
+    {ok, [["foo", "queue", "foo"],
+          ["amq.direct", "foo", "queue", "test"]
+         ]} = run_table(Config,
+                              ["list", "bindings",
+                               "source", "destination",
+                               "destination_type", "routing_key"]),
+    {ok, _} = run(Config, ["delete", "queue", "name=foo"]).
+
+policies(Config) ->
+    {ok, _} = run(Config, ["declare", "policy", "name=ha",
+                           "pattern=.*", "definition={\"ha-mode\":\"all\"}"]),
+    {ok, [["ha", "/", ".*", "{\"ha-mode\": \"all\"}"]]} =
+        run_table(Config, ["list", "policies", "name",
+                                 "vhost", "pattern", "definition"]),
+    {ok, _} = run(Config, ["delete", "policy", "name=ha"]).
+
+parameters(Config) ->
+    ok = rpc(Config, rabbit_mgmt_runtime_parameters_util, register, []),
+    {ok, _} = run(Config, ["declare", "parameter", "component=test",
+                           "name=good", "value=123"]),
+    {ok, [["test", "good", "/", "123"]]} = run_table(Config, ["list",
+                                                              "parameters",
+                                                              "component",
+                                                              "name",
+                                                              "vhost",
+                                                              "value"]),
+    {ok, _} = run(Config, ["delete", "parameter", "component=test", "name=good"]),
+    ok = rpc(Config, rabbit_mgmt_runtime_parameters_util, unregister, []).
+
+publish(Config) ->
+    {ok, _} = run(Config, ["declare", "queue", "name=test"]),
+    {ok, _} = run(Config, ["publish", "routing_key=test", "payload=test_1"]),
+    {ok, _} = run(Config, ["publish", "routing_key=test", "payload=test_2"]),
+    % publish with stdin
+    {ok, _} = rabbit_ct_helpers:exec(["python", "-c",
+                                      publish_with_stdin_python_program(Config, "test_3")],
+                                     []),
+
+    M = exp_msg("test", 2, "False", "test_1"),
+    {ok, [M]} = run_table(Config, ["get", "queue=test", "requeue=false"]),
+    M2 = exp_msg("test", 1, "False", "test_2"),
+    {ok, [M2]} = run_table(Config, ["get", "queue=test", "requeue=true"]),
+    M3 = exp_msg("test", 1, "True", "test_2"),
+    {ok, [M3]} = run_table(Config, ["get",
+                                    "queue=test",
+                                    "requeue=false"]),
+    M4 = exp_msg("test", 0, "False", "test_3"),
+    {ok, [M4]} = run_table(Config, ["get",
+                                    "queue=test",
+                                    "requeue=false"]),
+    {ok, _} = run(Config, ["publish", "routing_key=test", "payload=test_4"]),
+    Fn = filename:join(?config(priv_dir, Config), "publish_test_4"),
+
+    {ok, _} = run(Config, ["get", "queue=test", "requeue=false", "payload_file=" ++ Fn]),
+    {ok, <<"test_4">>} = file:read_file(Fn),
+    {ok, _} = run(Config, ["delete", "queue", "name=test"]).
+
+ignore_vhost(Config) ->
+    {ok, _} = run(Config, ["--vhost", "/", "show", "overview"]),
+    {ok, _} = run(Config, ["--vhost", "/", "list", "users"]),
+    {ok, _} = run(Config, ["--vhost", "/", "list", "vhosts"]),
+    {ok, _} = run(Config, ["--vhost", "/", "list", "nodes"]),
+    {ok, _} = run(Config, ["--vhost", "/", "list", "permissions"]),
+    {ok, _} = run(Config, ["--vhost", "/", "declare", "user",
+                           "name=foo", "password=pass", "tags="]),
+    {ok, _} = run(Config, ["delete", "user", "name=foo"]).
+
+sort(Config) ->
+    {ok, _} = run(Config, ["declare", "queue", "name=foo"]),
+    {ok, _} = run(Config, ["declare", "binding", "source=amq.direct",
+                           "destination=foo", "destination_type=queue",
+                           "routing_key=bbb"]),
+    {ok, _} = run(Config, ["declare", "binding", "source=amq.topic",
+                           "destination=foo", "destination_type=queue",
+                           "routing_key=aaa"]),
+    {ok, [["foo"],
+          ["amq.direct", "bbb"],
+          ["amq.topic", "aaa"]]} = run_table(Config, ["--sort", "source",
+                                                      "list", "bindings",
+                                                      "source", "routing_key"]),
+    {ok, [["amq.topic", "aaa"],
+          ["amq.direct", "bbb"],
+          ["foo"]]} = run_table(Config, ["--sort", "routing_key",
+                                         "list", "bindings", "source",
+                                         "routing_key"]),
+    {ok, [["amq.topic", "aaa"],
+          ["amq.direct", "bbb"],
+          ["foo"]]} = run_table(Config, ["--sort", "source",
+                                         "--sort-reverse", "list",
+                                         "bindings", "source",
+                                         "routing_key"]),
+    {ok, _} = run(Config, ["delete", "queue", "name=foo"]).
+
+%% -------------------------------------------------------------------
+%% Utilities
+%% -------------------------------------------------------------------
+
+exp_msg(Key, Count, Redelivered, Payload) ->
+    % routing_key, message_count,
+    % payload, payload_bytes,
+    % payload_encoding, redelivered
+    [Key, integer_to_list(Count),
+     Payload, integer_to_list(length(Payload)),
+     "string", Redelivered].
+
+rpc(Config, M, F, A) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0, M, F, A).
+
+l(Thing) ->
+    ["list", Thing, "name"].
+
+multi_line_string(Lines) ->
+    lists:flatten([string:join(Lines, io_lib:nl()), io_lib:nl()]).
+
+run_table(Config, Args) ->
+    {ok, Lines} = run_list(Config, Args),
+    Tokens = [string:tokens(L, "\t") || L <- Lines],
+    {ok, Tokens}.
+
+run_list(Config, Args) ->
+    A = ["-f", "tsv", "-q"],
+    case run(Config, A ++ Args) of
+        {ok, Out} -> {ok, string:tokens(Out, io_lib:nl())};
+        Err -> Err
+    end.
+
+run(Config, Args) ->
+    Py = rabbit_ct_helpers:get_config(Config, python),
+    MgmtPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mgmt),
+    RmqAdmin = rabbit_ct_helpers:get_config(Config, rabbitmqadmin_path),
+    rabbit_ct_helpers:exec([Py,
+                                  RmqAdmin,
+                                  "-P",
+                                  integer_to_list(MgmtPort)] ++ Args,
+                                 [drop_stdout]).
+
+rabbitmqadmin(Config) ->
+    filename:join([?config(current_srcdir, Config), "bin", "rabbitmqadmin"]).
+
+find_pythons() ->
+    Py2 = rabbit_ct_helpers:exec(["python2", "-V"]),
+    Py3 = rabbit_ct_helpers:exec(["python3", "-V"]),
+    case {Py2, Py3} of
+         {{ok, _}, {ok, _}} -> ["python2", "python3"];
+         {{ok, _}, _} -> ["python2"];
+         {_, {ok, _}} -> ["python3"];
+         _ -> erlang:error("python not found")
+    end.
+
+publish_with_stdin_python_program(Config, In) ->
+    % This is a nasty workaround erlang ports not supporting EOF
+    Py = rabbit_ct_helpers:get_config(Config, python),
+    MgmtPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mgmt),
+    RmqAdmin = rabbit_ct_helpers:get_config(Config, rabbitmqadmin_path),
+    "import subprocess;" ++
+    "proc = subprocess.Popen(['" ++ Py ++ "', '" ++ RmqAdmin ++ "', '-P', '" ++ integer_to_list(MgmtPort) ++
+    "', 'publish', 'routing_key=test'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE);" ++
+    "(stdout, stderr) = proc.communicate('" ++ In ++ "');" ++
+    "exit(proc.returncode)".
+
+write_test_config(Config) ->
+    MgmtPort = integer_to_list(rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mgmt)),
+    PrivDir = ?config(priv_dir, Config),
+    DefaultConfig = [
+        "[non_default]",
+        "hostname = localhost",
+        "port = 99999",
+        "username = guest",
+        "password = guest",
+        "declare_vhost = /",
+        "vhost = /",
+        "",
+        "[bad_host]",
+        "hostname = rabbit.acme.com",
+        "port = " ++ MgmtPort,
+        "username = guest",
+        "password = guest"
+                    ],
+    TestConfig = [
+        "[host_normal]",
+        "hostname = localhost",
+        "port = " ++ MgmtPort,
+        "username = guest",
+        "password = guest",
+        "declare_vhost = /",
+        "vhost = /",
+        "",
+        "[default]",
+        "hostname = localhost",
+        "port = 99999",
+        "username = guest",
+        "password = guest"
+           ],
+    DefaultConfig1 = [string:join(DefaultConfig, io_lib:nl()), io_lib:nl()],
+    TestConfig1 = [string:join(TestConfig, io_lib:nl()), io_lib:nl()],
+    FnDefault = filename:join(PrivDir, ".rabbitmqadmin.conf"),
+    FnTest = filename:join(PrivDir, "test-config"),
+    file:write_file(FnDefault, DefaultConfig1),
+    file:write_file(FnTest, TestConfig1),
+    {FnDefault, FnTest}.
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_runtime_parameters_util.erl b/rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_runtime_parameters_util.erl
new file mode 100644 (file)
index 0000000..9245827
--- /dev/null
@@ -0,0 +1,72 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_runtime_parameters_util).
+-behaviour(rabbit_runtime_parameter).
+-behaviour(rabbit_policy_validator).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([validate/5, notify/4, notify_clear/3]).
+-export([register/0, unregister/0]).
+-export([validate_policy/1]).
+-export([register_policy_validator/0, unregister_policy_validator/0]).
+
+%----------------------------------------------------------------------------
+
+register() ->
+    rabbit_registry:register(runtime_parameter, <<"test">>, ?MODULE).
+
+unregister() ->
+    rabbit_registry:unregister(runtime_parameter, <<"test">>).
+
+validate(_, <<"test">>, <<"good">>,  _Term, _User)      -> ok;
+validate(_, <<"test">>, <<"maybe">>, <<"good">>, _User) -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, none)       -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, User) ->
+    case lists:member(administrator, User#user.tags) of
+        true  -> ok;
+        false -> {error, "meh", []}
+    end;
+validate(_, <<"test">>, _, _, _)                        -> {error, "meh", []}.
+
+notify(_, _, _, _) -> ok.
+notify_clear(_, _, _) -> ok.
+
+%----------------------------------------------------------------------------
+
+register_policy_validator() ->
+    rabbit_registry:register(policy_validator, <<"testeven">>, ?MODULE),
+    rabbit_registry:register(policy_validator, <<"testpos">>,  ?MODULE).
+
+unregister_policy_validator() ->
+    rabbit_registry:unregister(policy_validator, <<"testeven">>),
+    rabbit_registry:unregister(policy_validator, <<"testpos">>).
+
+validate_policy([{<<"testeven">>, Terms}]) when is_list(Terms) ->
+    case  length(Terms) rem 2 =:= 0 of
+        true  -> ok;
+        false -> {error, "meh", []}
+    end;
+
+validate_policy([{<<"testpos">>, Terms}]) when is_list(Terms) ->
+    case lists:all(fun (N) -> is_integer(N) andalso N > 0 end, Terms) of
+        true  -> ok;
+        false -> {error, "meh", []}
+    end;
+
+validate_policy(_) ->
+    {error, "meh", []}.
similarity index 65%
rename from rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_db.erl
rename to rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_test_db_SUITE.erl
index 7c7dd0d9c9adad5b078e4968dcc2120a6ecf3c7a..1f198ccb6e80ac4ae471c6c0993c65ec1ec1dcc7 100644 (file)
@@ -1,41 +1,92 @@
-%%   The contents of this file are subject to the Mozilla Public License
-%%   Version 1.1 (the "License"); you may not use this file except in
-%%   compliance with the License. You may obtain a copy of the License at
-%%   http://www.mozilla.org/MPL/
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
 %%
-%%   Software distributed under the License is distributed on an "AS IS"
-%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%%   License for the specific language governing rights and limitations
-%%   under the License.
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
 %%
-%%   The Original Code is RabbitMQ Management Console.
+%% The Original Code is RabbitMQ.
 %%
-%%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_mgmt_test_db).
+-module(rabbit_mgmt_test_db_SUITE).
 
--include("rabbit_mgmt.hrl").
--include_lib("eunit/include/eunit.hrl").
--include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include("include/rabbit_mgmt.hrl").
+-include("include/rabbit_mgmt_test.hrl").
 
--import(rabbit_misc, [pget/2]).
--import(rabbit_mgmt_test_util, [assert_list/2, assert_item/2, test_item/2]).
-
--define(debugVal2(E),
-       ((fun (__V) ->
-                 ?debugFmt(<<"~s = ~p">>, [(??E), __V]),
-                 __V
-         end)(E))).
+-import(rabbit_mgmt_test_util, [assert_list/2, assert_item/2,
+                                reset_management_settings/1]).
 
-%%----------------------------------------------------------------------------
-%% Tests
-%%----------------------------------------------------------------------------
+-import(rabbit_misc, [pget/2]).
 
-queue_coarse_test() ->
-    rabbit_mgmt_db:override_lookups([{exchange, fun dummy_lookup/1},
-                                     {queue,    fun dummy_lookup/1}]),
+-compile(export_all).
+
+all() ->
+    [
+     {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+     {non_parallel_tests, [], [
+                               queue_coarse_test,
+                               connection_coarse_test,
+                               fine_stats_aggregation_time_test,
+                               fine_stats_aggregation_test
+                              ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    inets:start(),
+    Config.
+
+end_per_suite(Config) ->
+    Config.
+
+init_per_group(_, Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+                                                    {rmq_nodename_suffix, ?MODULE}
+                                                   ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+                                      rabbit_ct_broker_helpers:setup_steps() ++
+                                          rabbit_ct_client_helpers:setup_steps() ++
+                                          [fun rabbit_mgmt_test_util:reset_management_settings/1]).
+
+end_per_group(_, Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+                                         [fun rabbit_mgmt_test_util:reset_management_settings/1] ++
+                                         rabbit_ct_client_helpers:teardown_steps() ++
+                                             rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+    reset_management_settings(Config),
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    reset_management_settings(Config),
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+queue_coarse_test(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, queue_coarse_test1, [Config]).
+
+queue_coarse_test1(_Config) ->
+    rabbit_mgmt_event_collector:override_lookups([{exchange, fun dummy_lookup/1},
+                                                  {queue,    fun dummy_lookup/1}]),
     create_q(test, 0),
     create_q(test2, 0),
     stats_q(test, 0, 10),
@@ -51,10 +102,13 @@ queue_coarse_test() ->
     delete_q(test2, 0),
     assert_item(Exp(0), get_vhost(R)),
     assert_item(Exp(0), get_overview_q(R)),
-    rabbit_mgmt_db:reset_lookups(),
+    rabbit_mgmt_event_collector:reset_lookups(),
     ok.
 
-connection_coarse_test() ->
+connection_coarse_test(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, connection_coarse_test1, [Config]).
+
+connection_coarse_test1(_Config) ->
     create_conn(test, 0),
     create_conn(test2, 0),
     stats_conn(test, 0, 10),
@@ -68,9 +122,14 @@ connection_coarse_test() ->
     assert_list([], rabbit_mgmt_db:get_all_connections(R)),
     ok.
 
-fine_stats_aggregation_test() ->
-    rabbit_mgmt_db:override_lookups([{exchange, fun dummy_lookup/1},
-                                     {queue,    fun dummy_lookup/1}]),
+fine_stats_aggregation_test(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, fine_stats_aggregation_test1, [Config]).
+
+fine_stats_aggregation_test1(_Config) ->
+    application:set_env(rabbitmq_management, rates_mode, detailed),
+    restart_mgmt_db(),
+    rabbit_mgmt_event_collector:override_lookups([{exchange, fun dummy_lookup/1},
+                                                  {queue,    fun dummy_lookup/1}]),
     create_ch(ch1, 0),
     create_ch(ch2, 0),
     stats_ch(ch1, 0, [{x, 100}], [{q1, x, 100},
@@ -81,9 +140,11 @@ fine_stats_aggregation_test() ->
     fine_stats_aggregation_test0(true),
     delete_q(q2, 0),
     fine_stats_aggregation_test0(false),
+    delete_q(q1, 0),
     delete_ch(ch1, 1),
     delete_ch(ch2, 1),
-    rabbit_mgmt_db:reset_lookups(),
+    application:set_env(rabbitmq_management, rates_mode, basic),
+    restart_mgmt_db(),
     ok.
 
 fine_stats_aggregation_test0(Q2Exists) ->
@@ -126,9 +187,12 @@ fine_stats_aggregation_test0(Q2Exists) ->
     end,
     ok.
 
-fine_stats_aggregation_time_test() ->
-    rabbit_mgmt_db:override_lookups([{exchange, fun dummy_lookup/1},
-                                     {queue,    fun dummy_lookup/1}]),
+fine_stats_aggregation_time_test(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, fine_stats_aggregation_time_test1, [Config]).
+
+fine_stats_aggregation_time_test1(_Config) ->
+    rabbit_mgmt_event_collector:override_lookups([{exchange, fun dummy_lookup/1},
+                                                  {queue,    fun dummy_lookup/1}]),
     create_ch(ch, 0),
     stats_ch(ch, 0, [{x, 100}], [{q, x, 50}], [{q, 20}]),
     stats_ch(ch, 5, [{x, 110}], [{q, x, 55}], [{q, 22}]),
@@ -145,7 +209,7 @@ fine_stats_aggregation_time_test() ->
 
     delete_q(q, 0),
     delete_ch(ch, 1),
-    rabbit_mgmt_db:reset_lookups(),
+    rabbit_mgmt_event_collector:reset_lookups(),
     ok.
 
 assert_fine_stats(m, Type, N, Obj, R) ->
@@ -204,7 +268,7 @@ delete_ch(Name, Timestamp) ->
     event(channel_closed, [{pid, pid_del(Name)}], Timestamp).
 
 event(Type, Stats, Timestamp) ->
-    ok = gen_server:call(rabbit_mgmt_db,
+    ok = gen_server:call(rabbit_mgmt_event_collector,
                          {event, #event{type      = Type,
                                         props     = Stats,
                                         reference = none,
@@ -292,3 +356,7 @@ pid_del(Name) ->
 a2b(A) -> list_to_binary(atom_to_list(A)).
 
 dummy_lookup(_Thing) -> {ok, ignore_this}.
+
+restart_mgmt_db() ->
+    supervisor2:terminate_child(rabbit_mgmt_sup_sup, rabbit_mgmt_sup),
+    rabbit_mgmt_sup_sup:start_child().
similarity index 71%
rename from rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_unit.erl
rename to rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_test_unit_SUITE.erl
index 6fb988785cd5b35df365cd75e2b651bed1b987d8..8f49f37be14d5040f44427ca7f27fd68f867a043 100644 (file)
 %%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_mgmt_test_unit).
+-module(rabbit_mgmt_test_unit_SUITE).
 
+-include_lib("common_test/include/ct.hrl").
 -include_lib("eunit/include/eunit.hrl").
 
-tokenise_test() ->
+-compile(export_all).
+
+all() ->
+    [
+     {group, parallel_tests}
+    ].
+
+groups() ->
+    [
+     {parallel_tests, [parallel], [
+                                   tokenise_test,
+                                   pack_binding_test,
+                                   amqp_table_test
+                                  ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+tokenise_test(_Config) ->
     [] = rabbit_mgmt_format:tokenise(""),
     ["foo"] = rabbit_mgmt_format:tokenise("foo"),
     ["foo", "bar"] = rabbit_mgmt_format:tokenise("foo~bar"),
     ["foo", "", "bar"] = rabbit_mgmt_format:tokenise("foo~~bar"),
     ok.
 
-pack_binding_test() ->
+pack_binding_test(_Config) ->
     assert_binding(<<"~">>,
                    <<"">>, []),
     assert_binding(<<"foo">>,
@@ -36,7 +67,7 @@ pack_binding_test() ->
                    <<"foo~bar~bash">>, []),
     ok.
 
-amqp_table_test() ->
+amqp_table_test(_Config) ->
     assert_table({struct, []}, []),
     assert_table({struct, [{<<"x-expires">>, 1000}]},
                  [{<<"x-expires">>, long, 1000}]),
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_test_util.erl b/rabbitmq-server/deps/rabbitmq_management/test/rabbit_mgmt_test_util.erl
new file mode 100644 (file)
index 0000000..a5be762
--- /dev/null
@@ -0,0 +1,192 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ Management Console.
+%%
+%%   The Initial Developer of the Original Code is GoPivotal, Inc.
+%%   Copyright (c) 2010-2012 GoPivotal, Inc.  All rights reserved.
+%%
+
+-module(rabbit_mgmt_test_util).
+
+-include("include/rabbit_mgmt_test.hrl").
+
+-compile(export_all).
+
+reset_management_settings(Config) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_management, process_stats_gc_timeout, 300000]),
+    rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_management, collect_statistics_interval, 5000]),
+    Config.
+
+http_get(Config, Path) ->
+    http_get(Config, Path, ?OK).
+
+http_get(Config, Path, CodeExp) ->
+    http_get(Config, Path, "guest", "guest", CodeExp).
+
+http_get(Config, Path, User, Pass, CodeExp) ->
+    {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+        req(Config, get, Path, [auth_header(User, Pass)]),
+    assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
+    decode(CodeExp, Headers, ResBody).
+
+http_put(Config, Path, List, CodeExp) ->
+    http_put_raw(Config, Path, format_for_upload(List), CodeExp).
+
+http_put(Config, Path, List, User, Pass, CodeExp) ->
+    http_put_raw(Config, Path, format_for_upload(List), User, Pass, CodeExp).
+
+http_post(Config, Path, List, CodeExp) ->
+    http_post_raw(Config, Path, format_for_upload(List), CodeExp).
+
+http_post(Config, Path, List, User, Pass, CodeExp) ->
+    http_post_raw(Config, Path, format_for_upload(List), User, Pass, CodeExp).
+
+http_post_accept_json(Config, Path, List, CodeExp) ->
+    http_post_accept_json(Config, Path, List, "guest", "guest", CodeExp).
+
+http_post_accept_json(Config, Path, List, User, Pass, CodeExp) ->
+    http_post_raw(Config, Path, format_for_upload(List), User, Pass, CodeExp,
+                 [{"Accept", "application/json"}]).
+
+req(Config, Type, Path, Headers) ->
+    httpc:request(Type, {uri_base_from(Config) ++ Path, Headers}, ?HTTPC_OPTS, []).
+
+req(Config, Type, Path, Headers, Body) ->
+    httpc:request(Type, {uri_base_from(Config) ++ Path, Headers, "application/json", Body},
+                  ?HTTPC_OPTS, []).
+
+uri_base_from(Config) ->
+    binary_to_list(
+      rabbit_mgmt_format:print(
+        "http://localhost:~w/api",
+        [mgmt_port(Config)])).
+
+auth_header(Username, Password) ->
+    {"Authorization",
+     "Basic " ++ binary_to_list(base64:encode(Username ++ ":" ++ Password))}.
+
+amqp_port(Config) ->
+    config_port(Config, tcp_port_amqp).
+
+mgmt_port(Config) ->
+    config_port(Config, tcp_port_mgmt).
+
+config_port(Config, PortKey) ->
+    rabbit_ct_broker_helpers:get_node_config(Config, 0, PortKey).
+
+http_put_raw(Config, Path, Body, CodeExp) ->
+    http_upload_raw(Config, put, Path, Body, "guest", "guest", CodeExp, []).
+
+http_put_raw(Config, Path, Body, User, Pass, CodeExp) ->
+    http_upload_raw(Config, put, Path, Body, User, Pass, CodeExp, []).
+
+
+http_post_raw(Config, Path, Body, CodeExp) ->
+    http_upload_raw(Config, post, Path, Body, "guest", "guest", CodeExp, []).
+
+http_post_raw(Config, Path, Body, User, Pass, CodeExp) ->
+    http_upload_raw(Config, post, Path, Body, User, Pass, CodeExp, []).
+
+http_post_raw(Config, Path, Body, User, Pass, CodeExp, MoreHeaders) ->
+    http_upload_raw(Config, post, Path, Body, User, Pass, CodeExp, MoreHeaders).
+
+
+http_upload_raw(Config, Type, Path, Body, User, Pass, CodeExp, MoreHeaders) ->
+    {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+       req(Config, Type, Path, [auth_header(User, Pass)] ++ MoreHeaders, Body),
+    assert_code(CodeExp, CodeAct, Type, Path, ResBody),
+    decode(CodeExp, Headers, ResBody).
+
+http_delete(Config, Path, CodeExp) ->
+    http_delete(Config, Path, "guest", "guest", CodeExp).
+
+http_delete(Config, Path, User, Pass, CodeExp) ->
+    {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+        req(Config, delete, Path, [auth_header(User, Pass)]),
+    assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody),
+    decode(CodeExp, Headers, ResBody).
+
+format_for_upload(none) ->
+    <<"">>;
+format_for_upload(List) ->
+    iolist_to_binary(mochijson2:encode({struct, List})).
+
+assert_code(CodesExpected, CodeAct, Type, Path, Body) when is_list(CodesExpected) ->
+    case lists:member(CodeAct, CodesExpected) of
+        true ->
+            ok;
+        false ->
+            throw({expected, CodesExpected, got, CodeAct, type, Type,
+                   path, Path, body, Body})
+    end;
+assert_code(CodeExp, CodeAct, Type, Path, Body) ->
+    case CodeExp of
+        CodeAct -> ok;
+        _       -> throw({expected, CodeExp, got, CodeAct, type, Type,
+                          path, Path, body, Body})
+    end.
+
+decode(?OK, _Headers,  ResBody) -> cleanup(mochijson2:decode(ResBody));
+decode(_,    Headers, _ResBody) -> Headers.
+
+cleanup(L) when is_list(L) ->
+    [cleanup(I) || I <- L];
+cleanup({struct, I}) ->
+    cleanup(I);
+cleanup({K, V}) when is_binary(K) ->
+    {list_to_atom(binary_to_list(K)), cleanup(V)};
+cleanup(I) ->
+    I.
+
+assert_list(Exp, Act) ->
+    case length(Exp) == length(Act) of
+        true  -> ok;
+        false -> throw({expected, Exp, actual, Act})
+    end,
+    [case length(lists:filter(fun(ActI) -> test_item(ExpI, ActI) end, Act)) of
+         1 -> ok;
+         N -> throw({found, N, ExpI, in, Act})
+     end || ExpI <- Exp].
+
+assert_item(Exp, Act) ->
+    case test_item0(Exp, Act) of
+        [] -> ok;
+        Or -> throw(Or)
+    end.
+
+test_item(Exp, Act) ->
+    case test_item0(Exp, Act) of
+        [] -> true;
+        _  -> false
+    end.
+
+test_item0(Exp, Act) ->
+    [{did_not_find, ExpI, in, Act} || ExpI <- Exp,
+                                      not lists:member(ExpI, Act)].
+
+assert_keys(Exp, Act) ->
+    case test_key0(Exp, Act) of
+        [] -> ok;
+        Or -> throw(Or)
+    end.
+
+test_key0(Exp, Act) ->
+    [{did_not_find, ExpI, in, Act} || ExpI <- Exp,
+                                      not proplists:is_defined(ExpI, Act)].
+assert_no_keys(NotExp, Act) ->
+    case test_no_key0(NotExp, Act) of
+        [] -> ok;
+        Or -> throw(Or)
+    end.
+
+test_no_key0(Exp, Act) ->
+    [{invalid_key, ExpI, in, Act} || ExpI <- Exp,
+                                      proplists:is_defined(ExpI, Act)].
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/src/default-config b/rabbitmq-server/deps/rabbitmq_management/test/src/default-config
deleted file mode 100644 (file)
index b76eba8..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-# rabbitmqadmin.conf.example START
-
-[non_default]
-hostname = localhost
-port = 25672
-username = guest
-password = guest
-declare_vhost = / # Used as default for declare / delete only
-vhost = /         # Used as default for declare / delete / list
-
-[bad_host]
-hostname = rabbit.acme.com
-port = 15672
-username = guest
-password = guest
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_clustering.erl b/rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_clustering.erl
deleted file mode 100644 (file)
index 642b427..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-%%   The contents of this file are subject to the Mozilla Public License
-%%   Version 1.1 (the "License"); you may not use this file except in
-%%   compliance with the License. You may obtain a copy of the License at
-%%   http://www.mozilla.org/MPL/
-%%
-%%   Software distributed under the License is distributed on an "AS IS"
-%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%%   License for the specific language governing rights and limitations
-%%   under the License.
-%%
-%%   The Original Code is RabbitMQ Management Console.
-%%
-%%   The Initial Developers of the Original Code are Rabbit Technologies Ltd.
-%%
-%%   Copyright (C) 2010 Rabbit Technologies Ltd.
-%%
-%%   All Rights Reserved.
-%%
-%%   Contributor(s): ______________________________________.
-%%
--module(rabbit_mgmt_test_clustering).
-
--compile(export_all).
--include("rabbit_mgmt_test.hrl").
-
--import(rabbit_mgmt_test_http, [http_get/1, http_put/3, http_delete/2]).
--import(rabbit_misc, [pget/2]).
-
-%%----------------------------------------------------------------------------
-
-cluster_nodes_with() -> cluster_ab.
-cluster_nodes([_A, _B]) ->
-    ?assertEqual(2, length(http_get("/nodes"))),
-    ok.
-
-ha_with() -> cluster_ab.
-ha([RabbitCfg, HareCfg]) ->
-    Rabbit = pget(nodename, RabbitCfg),
-    Hare = pget(nodename, HareCfg),
-    Policy = [{pattern,    <<".*">>},
-              {definition, [{'ha-mode', <<"all">>}]}],
-    http_put("/policies/%2f/HA", Policy, ?NO_CONTENT),
-    QArgs = [{node, list_to_binary(atom_to_list(Hare))}],
-    http_put("/queues/%2f/ha-queue", QArgs, ?NO_CONTENT),
-    Q = wait_for("/queues/%2f/ha-queue"),
-    assert_node(Hare, pget(node, Q)),
-    assert_single_node(Rabbit, pget(slave_nodes, Q)),
-    assert_single_node(Rabbit, pget(synchronised_slave_nodes, Q)),
-    _HareCfg2 = rabbit_test_configs:restart_node(HareCfg),
-
-    Q2 = wait_for("/queues/%2f/ha-queue"),
-    assert_node(Rabbit, pget(node, Q2)),
-    assert_single_node(Hare, pget(slave_nodes, Q2)),
-    assert_single_node(Hare, pget(synchronised_slave_nodes, Q2)),
-    http_delete("/queues/%2f/ha-queue", ?NO_CONTENT),
-    http_delete("/policies/%2f/HA", ?NO_CONTENT),
-    ok.
-
-%%----------------------------------------------------------------------------
-
-wait_for(Path) ->
-    wait_for(Path, [slave_nodes, synchronised_slave_nodes]).
-
-wait_for(Path, Keys) ->
-    wait_for(Path, Keys, 1000).
-
-wait_for(Path, Keys, 0) ->
-    exit({timeout, {Path, Keys}});
-
-wait_for(Path, Keys, Count) ->
-    Res = http_get(Path),
-    case present(Keys, Res) of
-        false -> timer:sleep(10),
-                 wait_for(Path, Keys, Count - 1);
-        true  -> Res
-    end.
-
-present(Keys, Res) ->
-    lists:all(fun (Key) ->
-                      X = pget(Key, Res),
-                      X =/= [] andalso X =/= undefined
-              end, Keys).
-
-assert_single_node(Exp, Act) ->
-    ?assertEqual(1, length(Act)),
-    assert_node(Exp, hd(Act)).
-
-assert_nodes(Exp, Act0) ->
-    Act = [read_node(A) || A <- Act0],
-    ?debugVal({Exp, Act}),
-    ?assertEqual(length(Exp), length(Act)),
-    [?assert(lists:member(E, Act)) || E <- Exp].
-
-assert_node(Exp, Act) ->
-    ?assertEqual(Exp, read_node(Act)).
-
-read_node(N) ->
-    list_to_atom(hd(string:tokens(binary_to_list(N), "@"))).
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_db_unit.erl b/rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_db_unit.erl
deleted file mode 100644 (file)
index 80af615..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-%%   The contents of this file are subject to the Mozilla Public License
-%%   Version 1.1 (the "License"); you may not use this file except in
-%%   compliance with the License. You may obtain a copy of the License at
-%%   http://www.mozilla.org/MPL/
-%%
-%%   Software distributed under the License is distributed on an "AS IS"
-%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%%   License for the specific language governing rights and limitations
-%%   under the License.
-%%
-%%   The Original Code is RabbitMQ Management Console.
-%%
-%%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2012 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(rabbit_mgmt_test_db_unit).
-
--include("rabbit_mgmt.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-gc_test() ->
-    T = fun (Before, After) ->
-                ?assertEqual(After, unstats(
-                                      rabbit_mgmt_stats:gc(
-                                        cutoff(), stats(Before))))
-        end,
-    %% Cut off old sample, move to base
-    T({[{8999, 123}, {9000, 456}], 0},
-      {[{9000, 456}], 123}),
-    %% Amalgamate old samples to rounder one
-    T({[{9001, 100}, {9010, 020}, {10000, 003}], 0},
-      {[{10000, 123}], 0}),
-    %% The same, but a bit less
-    T({[{9000, 100}, {9901, 020}, {9910, 003}], 0},
-      {[{9000, 100}, {9910, 023}], 0}),
-    %% Nothing needs to be done
-    T({[{9000, 100}, {9990, 020}, {9991, 003}], 0},
-      {[{9000, 100}, {9990, 020}, {9991, 003}], 0}),
-    %% Invent a newer sample that's acceptable
-    T({[{9001, 10}, {9010, 02}], 0},
-      {[{9100, 12}], 0}),
-    %% ...but don't if it's too old
-    T({[{8001, 10}, {8010, 02}], 0},
-      {[], 12}),
-    ok.
-
-format_test() ->
-    Interval = 10,
-    T = fun ({First, Last, Incr}, Stats, Results) ->
-                ?assertEqual(format(Results),
-                             rabbit_mgmt_stats:format(
-                               #range{first = First * 1000,
-                                      last  = Last * 1000,
-                                      incr  = Incr * 1000},
-                               stats(Stats),
-                               Interval * 1000))
-        end,
-
-    %% Just three samples, all of which we format. Note the
-    %% instantaneous rate is taken from the penultimate sample.
-    T({10, 30, 10}, {[{10, 10}, {20, 20}, {30, 30}], 1},
-      {[{30, 61}, {20, 31}, {10, 11}], 2.0, 2.5, 103/3, 61}),
-
-    %% Skip over the second (and ditto).
-    T({10, 30, 20}, {[{10, 10}, {20, 20}, {30, 30}], 1},
-      {[{30, 61}, {10, 11}], 2.0, 2.5, 36.0, 61}),
-
-    %% Skip over some and invent some. Note that the instantaneous
-    %% rate drops to 0 since the last event is now in the past.
-    T({0, 40, 20}, {[{10, 10}, {20, 20}, {30, 30}], 1},
-      {[{40, 61}, {20, 31}, {0, 1}], 0.0, 1.5, 31.0, 61}),
-
-    %% And a case where the range starts after the samples
-    T({20, 40, 10}, {[{10, 10}, {20, 20}, {30, 30}], 1},
-      {[{40, 61}, {30, 61}, {20, 31}], 0.0, 1.5, 51.0, 61}),
-
-    %% A single sample - which should lead to some bits not getting generated
-    T({10, 10, 10}, {[{10, 10}, {20, 20}, {30, 30}], 1},
-      {[{10, 11}], 0.0, 11}),
-
-    %% No samples - which should also lead to some bits not getting generated
-    T({10, 0, 10}, {[{10, 10}, {20, 20}, {30, 30}], 1},
-      {[], 0.0, 1}),
-
-    %% TODO more?
-    ok.
-
-format_no_range_test() ->
-    Interval = 10,
-    T = fun (Stats, Results) ->
-                ?assertEqual(format(Results),
-                             rabbit_mgmt_stats:format(
-                               no_range, stats(Stats), Interval * 1000))
-        end,
-
-    %% Just three samples
-    T({[{10, 10}, {20, 20}, {30, 30}], 1},
-      {0.0, 61}),
-    ok.
-
-
-%%--------------------------------------------------------------------
-
-cutoff() ->
-    {[{10, 1}, {100, 10}, {1000, 100}], %% Sec
-     10000000}. %% Millis
-
-stats({Diffs, Base}) ->
-    #stats{diffs = gb_trees:from_orddict(secs_to_millis(Diffs)), base = Base}.
-
-unstats(#stats{diffs = Diffs, base = Base}) ->
-    {millis_to_secs(gb_trees:to_list(Diffs)), Base}.
-
-secs_to_millis(L) -> [{TS * 1000, S} || {TS, S} <- L].
-millis_to_secs(L) -> [{TS div 1000, S} || {TS, S} <- L].
-
-format({Rate, Count}) ->
-    {[{rate,     Rate}],
-     Count};
-
-format({Samples, Rate, Count}) ->
-    {[{rate,     Rate},
-      {samples,  format_samples(Samples)}],
-     Count};
-
-format({Samples, Rate, AvgRate, Avg, Count}) ->
-    {[{rate,     Rate},
-      {samples,  format_samples(Samples)},
-      {avg_rate, AvgRate},
-      {avg,      Avg}],
-     Count}.
-
-format_samples(Samples) ->
-    [[{sample, S}, {timestamp, TS * 1000}] || {TS, S} <- Samples].
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_http.erl b/rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_http.erl
deleted file mode 100644 (file)
index 0b2eb53..0000000
+++ /dev/null
@@ -1,1860 +0,0 @@
-%%   The contents of this file are subject to the Mozilla Public License
-%%   Version 1.1 (the "License"); you may not use this file except in
-%%   compliance with the License. You may obtain a copy of the License at
-%%   http://www.mozilla.org/MPL/
-%%
-%%   Software distributed under the License is distributed on an "AS IS"
-%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%%   License for the specific language governing rights and limitations
-%%   under the License.
-%%
-%%   The Original Code is RabbitMQ Management Console.
-%%
-%%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(rabbit_mgmt_test_http).
-
--include("rabbit_mgmt_test.hrl").
-
--export([http_get/1, http_put/3, http_delete/2]).
-
--import(rabbit_mgmt_test_util, [assert_list/2, assert_item/2, test_item/2]).
--import(rabbit_misc, [pget/2]).
-
-overview_test() ->
-    %% Rather crude, but this req doesn't say much and at least this means it
-    %% didn't blow up.
-    true = 0 < length(pget(listeners, http_get("/overview"))),
-    http_put("/users/myuser", [{password, <<"myuser">>},
-                               {tags,     <<"management">>}], [?CREATED, ?NO_CONTENT]),
-    http_get("/overview", "myuser", "myuser", ?OK),
-    http_delete("/users/myuser", ?NO_CONTENT),
-    %% TODO uncomment when priv works in test
-    %%http_get(""),
-    ok.
-
-cluster_name_test() ->
-    http_put("/users/myuser", [{password, <<"myuser">>},
-                               {tags,     <<"management">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/cluster-name", [{name, "foo"}], "myuser", "myuser", ?NOT_AUTHORISED),
-    http_put("/cluster-name", [{name, "foo"}], ?NO_CONTENT),
-    [{name, "foo"}] = http_get("/cluster-name", "myuser", "myuser", ?OK),
-    http_delete("/users/myuser", ?NO_CONTENT),
-    ok.
-
-nodes_test() ->
-    http_put("/users/user", [{password, <<"user">>},
-                             {tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/users/monitor", [{password, <<"monitor">>},
-                                {tags, <<"monitoring">>}], [?CREATED, ?NO_CONTENT]),
-    DiscNode = [{type, <<"disc">>}, {running, true}],
-    assert_list([DiscNode], http_get("/nodes")),
-    assert_list([DiscNode], http_get("/nodes", "monitor", "monitor", ?OK)),
-    http_get("/nodes", "user", "user", ?NOT_AUTHORISED),
-    [Node] = http_get("/nodes"),
-    Path = "/nodes/" ++ binary_to_list(pget(name, Node)),
-    assert_item(DiscNode, http_get(Path, ?OK)),
-    assert_item(DiscNode, http_get(Path, "monitor", "monitor", ?OK)),
-    http_get(Path, "user", "user", ?NOT_AUTHORISED),
-    http_delete("/users/user", ?NO_CONTENT),
-    http_delete("/users/monitor", ?NO_CONTENT),
-    ok.
-
-auth_test() ->
-    http_put("/users/user", [{password, <<"user">>},
-                             {tags, <<"">>}], [?CREATED, ?NO_CONTENT]),
-    test_auth(?NOT_AUTHORISED, []),
-    test_auth(?NOT_AUTHORISED, [auth_header("user", "user")]),
-    test_auth(?NOT_AUTHORISED, [auth_header("guest", "gust")]),
-    test_auth(?OK, [auth_header("guest", "guest")]),
-    http_delete("/users/user", ?NO_CONTENT),
-    ok.
-
-%% This test is rather over-verbose as we're trying to test understanding of
-%% Webmachine
-vhosts_test() ->
-    assert_list([[{name, <<"/">>}]], http_get("/vhosts")),
-    %% Create a new one
-    http_put("/vhosts/myvhost", none, [?CREATED, ?NO_CONTENT]),
-    %% PUT should be idempotent
-    http_put("/vhosts/myvhost", none, ?NO_CONTENT),
-    %% Check it's there
-    assert_list([[{name, <<"/">>}], [{name, <<"myvhost">>}]],
-                http_get("/vhosts")),
-    %% Check individually
-    assert_item([{name, <<"/">>}], http_get("/vhosts/%2f", ?OK)),
-    assert_item([{name, <<"myvhost">>}],http_get("/vhosts/myvhost")),
-    %% Delete it
-    http_delete("/vhosts/myvhost", ?NO_CONTENT),
-    %% It's not there
-    http_get("/vhosts/myvhost", ?NOT_FOUND),
-    http_delete("/vhosts/myvhost", ?NOT_FOUND).
-
-vhosts_trace_test() ->
-    http_put("/vhosts/myvhost", none, [?CREATED, ?NO_CONTENT]),
-    Disabled = [{name,  <<"myvhost">>}, {tracing, false}],
-    Enabled  = [{name,  <<"myvhost">>}, {tracing, true}],
-    Disabled = http_get("/vhosts/myvhost"),
-    http_put("/vhosts/myvhost", [{tracing, true}], ?NO_CONTENT),
-    Enabled = http_get("/vhosts/myvhost"),
-    http_put("/vhosts/myvhost", [{tracing, true}], ?NO_CONTENT),
-    Enabled = http_get("/vhosts/myvhost"),
-    http_put("/vhosts/myvhost", [{tracing, false}], ?NO_CONTENT),
-    Disabled = http_get("/vhosts/myvhost"),
-    http_delete("/vhosts/myvhost", ?NO_CONTENT).
-
-users_test() ->
-    assert_item([{name, <<"guest">>}, {tags, <<"administrator">>}],
-                http_get("/whoami")),
-    http_get("/users/myuser", ?NOT_FOUND),
-    http_put_raw("/users/myuser", "Something not JSON", ?BAD_REQUEST),
-    http_put("/users/myuser", [{flim, <<"flam">>}], ?BAD_REQUEST),
-    http_put("/users/myuser", [{tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/users/myuser", [{password_hash, <<"not_hash">>}], ?BAD_REQUEST),
-    http_put("/users/myuser", [{password_hash,
-                                <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
-                               {tags, <<"management">>}], ?NO_CONTENT),
-    assert_item([{name, <<"myuser">>}, {tags, <<"management">>},
-                 {password_hash, <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
-                 {hashing_algorithm, <<"rabbit_password_hashing_sha256">>}],
-                http_get("/users/myuser")),
-
-    http_put("/users/myuser", [{password_hash,
-                                <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
-                               {hashing_algorithm, <<"rabbit_password_hashing_md5">>},
-                               {tags, <<"management">>}], ?NO_CONTENT),
-    assert_item([{name, <<"myuser">>}, {tags, <<"management">>},
-                 {password_hash, <<"IECV6PZI/Invh0DL187KFpkO5Jc=">>},
-                 {hashing_algorithm, <<"rabbit_password_hashing_md5">>}],
-                http_get("/users/myuser")),
-    http_put("/users/myuser", [{password, <<"password">>},
-                               {tags, <<"administrator, foo">>}], ?NO_CONTENT),
-    assert_item([{name, <<"myuser">>}, {tags, <<"administrator,foo">>}],
-                http_get("/users/myuser")),
-    assert_list([[{name, <<"myuser">>}, {tags, <<"administrator,foo">>}],
-                 [{name, <<"guest">>}, {tags, <<"administrator">>}]],
-                http_get("/users")),
-    test_auth(?OK, [auth_header("myuser", "password")]),
-    http_delete("/users/myuser", ?NO_CONTENT),
-    test_auth(?NOT_AUTHORISED, [auth_header("myuser", "password")]),
-    http_get("/users/myuser", ?NOT_FOUND),
-    ok.
-
-users_legacy_administrator_test() ->
-    http_put("/users/myuser1", [{administrator, <<"true">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/users/myuser2", [{administrator, <<"false">>}], [?CREATED, ?NO_CONTENT]),
-    assert_item([{name, <<"myuser1">>}, {tags, <<"administrator">>}],
-                http_get("/users/myuser1")),
-    assert_item([{name, <<"myuser2">>}, {tags, <<"">>}],
-                http_get("/users/myuser2")),
-    http_delete("/users/myuser1", ?NO_CONTENT),
-    http_delete("/users/myuser2", ?NO_CONTENT),
-    ok.
-
-permissions_validation_test() ->
-    Good = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
-    http_put("/permissions/wrong/guest", Good, ?BAD_REQUEST),
-    http_put("/permissions/%2f/wrong", Good, ?BAD_REQUEST),
-    http_put("/permissions/%2f/guest",
-             [{configure, <<"[">>}, {write, <<".*">>}, {read, <<".*">>}],
-             ?BAD_REQUEST),
-    http_put("/permissions/%2f/guest", Good, ?NO_CONTENT),
-    ok.
-
-permissions_list_test() ->
-    [[{user,<<"guest">>},
-      {vhost,<<"/">>},
-      {configure,<<".*">>},
-      {write,<<".*">>},
-      {read,<<".*">>}]] =
-        http_get("/permissions"),
-
-    http_put("/users/myuser1", [{password, <<"">>}, {tags, <<"administrator">>}],
-             [?CREATED, ?NO_CONTENT]),
-    http_put("/users/myuser2", [{password, <<"">>}, {tags, <<"administrator">>}],
-             [?CREATED, ?NO_CONTENT]),
-    http_put("/vhosts/myvhost1", none, [?CREATED, ?NO_CONTENT]),
-    http_put("/vhosts/myvhost2", none, [?CREATED, ?NO_CONTENT]),
-
-    Perms = [{configure, <<"foo">>}, {write, <<"foo">>}, {read, <<"foo">>}],
-    http_put("/permissions/myvhost1/myuser1", Perms, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/myvhost2/myuser1", Perms, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/myvhost1/myuser2", Perms, [?CREATED, ?NO_CONTENT]),
-
-    4 = length(http_get("/permissions")),
-    2 = length(http_get("/users/myuser1/permissions")),
-    1 = length(http_get("/users/myuser2/permissions")),
-
-    http_get("/users/notmyuser/permissions", ?NOT_FOUND),
-    http_get("/vhosts/notmyvhost/permissions", ?NOT_FOUND),
-
-    http_delete("/users/myuser1", ?NO_CONTENT),
-    http_delete("/users/myuser2", ?NO_CONTENT),
-    http_delete("/vhosts/myvhost1", ?NO_CONTENT),
-    http_delete("/vhosts/myvhost2", ?NO_CONTENT),
-    ok.
-
-permissions_test() ->
-    http_put("/users/myuser", [{password, <<"myuser">>}, {tags, <<"administrator">>}],
-             [?CREATED, ?NO_CONTENT]),
-    http_put("/vhosts/myvhost", none, [?CREATED, ?NO_CONTENT]),
-
-    http_put("/permissions/myvhost/myuser",
-             [{configure, <<"foo">>}, {write, <<"foo">>}, {read, <<"foo">>}],
-             [?CREATED, ?NO_CONTENT]),
-
-    Permission = [{user,<<"myuser">>},
-                  {vhost,<<"myvhost">>},
-                  {configure,<<"foo">>},
-                  {write,<<"foo">>},
-                  {read,<<"foo">>}],
-    Default = [{user,<<"guest">>},
-               {vhost,<<"/">>},
-               {configure,<<".*">>},
-               {write,<<".*">>},
-               {read,<<".*">>}],
-    Permission = http_get("/permissions/myvhost/myuser"),
-    assert_list([Permission, Default], http_get("/permissions")),
-    assert_list([Permission], http_get("/users/myuser/permissions")),
-    http_delete("/permissions/myvhost/myuser", ?NO_CONTENT),
-    http_get("/permissions/myvhost/myuser", ?NOT_FOUND),
-
-    http_delete("/users/myuser", ?NO_CONTENT),
-    http_delete("/vhosts/myvhost", ?NO_CONTENT),
-    ok.
-
-connections_test() ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    LocalPort = local_port(Conn),
-    Path = binary_to_list(
-             rabbit_mgmt_format:print(
-               "/connections/127.0.0.1%3A~w%20->%20127.0.0.1%3A5672",
-               [LocalPort])),
-    http_get(Path, ?OK),
-    http_delete(Path, ?NO_CONTENT),
-    %% TODO rabbit_reader:shutdown/2 returns before the connection is
-    %% closed. It may not be worth fixing.
-    timer:sleep(200),
-    http_get(Path, ?NOT_FOUND).
-
-test_auth(Code, Headers) ->
-    {ok, {{_, Code, _}, _, _}} = req(get, "/overview", Headers).
-
-exchanges_test() ->
-    %% Can pass booleans or strings
-    Good = [{type, <<"direct">>}, {durable, <<"true">>}],
-    http_put("/vhosts/myvhost", none, [?CREATED, ?NO_CONTENT]),
-    http_get("/exchanges/myvhost/foo", ?NOT_AUTHORISED),
-    http_put("/exchanges/myvhost/foo", Good, ?NOT_AUTHORISED),
-    http_put("/permissions/myvhost/guest",
-             [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
-             [?CREATED, ?NO_CONTENT]),
-    http_get("/exchanges/myvhost/foo", ?NOT_FOUND),
-    http_put("/exchanges/myvhost/foo", Good, [?CREATED, ?NO_CONTENT]),
-    http_put("/exchanges/myvhost/foo", Good, ?NO_CONTENT),
-    http_get("/exchanges/%2f/foo", ?NOT_FOUND),
-    assert_item([{name,<<"foo">>},
-                 {vhost,<<"myvhost">>},
-                 {type,<<"direct">>},
-                 {durable,true},
-                 {auto_delete,false},
-                 {internal,false},
-                 {arguments,[]}],
-                http_get("/exchanges/myvhost/foo")),
-
-    http_put("/exchanges/badvhost/bar", Good, ?NOT_FOUND),
-    http_put("/exchanges/myvhost/bar", [{type, <<"bad_exchange_type">>}],
-             ?BAD_REQUEST),
-    http_put("/exchanges/myvhost/bar", [{type, <<"direct">>},
-                                        {durable, <<"troo">>}],
-             ?BAD_REQUEST),
-    http_put("/exchanges/myvhost/foo", [{type, <<"direct">>}],
-             ?BAD_REQUEST),
-
-    http_delete("/exchanges/myvhost/foo", ?NO_CONTENT),
-    http_delete("/exchanges/myvhost/foo", ?NOT_FOUND),
-
-    http_delete("/vhosts/myvhost", ?NO_CONTENT),
-    http_get("/exchanges/badvhost", ?NOT_FOUND),
-    ok.
-
-queues_test() ->
-    Good = [{durable, true}],
-    http_get("/queues/%2f/foo", ?NOT_FOUND),
-    http_put("/queues/%2f/foo", Good, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/%2f/foo", Good, ?NO_CONTENT),
-    http_get("/queues/%2f/foo", ?OK),
-
-    http_put("/queues/badvhost/bar", Good, ?NOT_FOUND),
-    http_put("/queues/%2f/bar",
-             [{durable, <<"troo">>}],
-             ?BAD_REQUEST),
-    http_put("/queues/%2f/foo",
-             [{durable, false}],
-             ?BAD_REQUEST),
-
-    http_put("/queues/%2f/baz", Good, [?CREATED, ?NO_CONTENT]),
-
-    Queues = http_get("/queues/%2f"),
-    Queue = http_get("/queues/%2f/foo"),
-    assert_list([[{name,        <<"foo">>},
-                  {vhost,       <<"/">>},
-                  {durable,     true},
-                  {auto_delete, false},
-                  {exclusive,   false},
-                  {arguments,   []}],
-                 [{name,        <<"baz">>},
-                  {vhost,       <<"/">>},
-                  {durable,     true},
-                  {auto_delete, false},
-                  {exclusive,   false},
-                  {arguments,   []}]], Queues),
-    assert_item([{name,        <<"foo">>},
-                 {vhost,       <<"/">>},
-                 {durable,     true},
-                 {auto_delete, false},
-                 {exclusive,   false},
-                 {arguments,   []}], Queue),
-
-    http_delete("/queues/%2f/foo", ?NO_CONTENT),
-    http_delete("/queues/%2f/baz", ?NO_CONTENT),
-    http_delete("/queues/%2f/foo", ?NOT_FOUND),
-    http_get("/queues/badvhost", ?NOT_FOUND),
-    ok.
-
-bindings_test() ->
-    XArgs = [{type, <<"direct">>}],
-    QArgs = [],
-    http_put("/exchanges/%2f/myexchange", XArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/%2f/myqueue", QArgs, [?CREATED, ?NO_CONTENT]),
-    BArgs = [{routing_key, <<"routing">>}, {arguments, []}],
-    http_post("/bindings/%2f/e/myexchange/q/myqueue", BArgs, [?CREATED, ?NO_CONTENT]),
-    http_get("/bindings/%2f/e/myexchange/q/myqueue/routing", ?OK),
-    http_get("/bindings/%2f/e/myexchange/q/myqueue/rooting", ?NOT_FOUND),
-    Binding =
-        [{source,<<"myexchange">>},
-         {vhost,<<"/">>},
-         {destination,<<"myqueue">>},
-         {destination_type,<<"queue">>},
-         {routing_key,<<"routing">>},
-         {arguments,[]},
-         {properties_key,<<"routing">>}],
-    DBinding =
-        [{source,<<"">>},
-         {vhost,<<"/">>},
-         {destination,<<"myqueue">>},
-         {destination_type,<<"queue">>},
-         {routing_key,<<"myqueue">>},
-         {arguments,[]},
-         {properties_key,<<"myqueue">>}],
-    Binding = http_get("/bindings/%2f/e/myexchange/q/myqueue/routing"),
-    assert_list([Binding],
-                http_get("/bindings/%2f/e/myexchange/q/myqueue")),
-    assert_list([Binding, DBinding],
-                http_get("/queues/%2f/myqueue/bindings")),
-    assert_list([Binding],
-                http_get("/exchanges/%2f/myexchange/bindings/source")),
-    http_delete("/bindings/%2f/e/myexchange/q/myqueue/routing", ?NO_CONTENT),
-    http_delete("/bindings/%2f/e/myexchange/q/myqueue/routing", ?NOT_FOUND),
-    http_delete("/exchanges/%2f/myexchange", ?NO_CONTENT),
-    http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
-    http_get("/bindings/badvhost", ?NOT_FOUND),
-    http_get("/bindings/badvhost/myqueue/myexchange/routing", ?NOT_FOUND),
-    http_get("/bindings/%2f/e/myexchange/q/myqueue/routing", ?NOT_FOUND),
-    ok.
-
-bindings_post_test() ->
-    XArgs = [{type, <<"direct">>}],
-    QArgs = [],
-    BArgs = [{routing_key, <<"routing">>}, {arguments, [{foo, <<"bar">>}]}],
-    http_put("/exchanges/%2f/myexchange", XArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/%2f/myqueue", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_post("/bindings/%2f/e/myexchange/q/badqueue", BArgs, ?NOT_FOUND),
-    http_post("/bindings/%2f/e/badexchange/q/myqueue", BArgs, ?NOT_FOUND),
-    Headers1 = http_post("/bindings/%2f/e/myexchange/q/myqueue", [], [?CREATED, ?NO_CONTENT]),
-    "../../../../%2F/e/myexchange/q/myqueue/~" = pget("location", Headers1),
-    Headers2 = http_post("/bindings/%2f/e/myexchange/q/myqueue", BArgs, [?CREATED, ?NO_CONTENT]),
-    PropertiesKey = "routing~V4mGFgnPNrdtRmluZIxTDA",
-    PropertiesKeyBin = list_to_binary(PropertiesKey),
-    "../../../../%2F/e/myexchange/q/myqueue/" ++ PropertiesKey =
-        pget("location", Headers2),
-    URI = "/bindings/%2F/e/myexchange/q/myqueue/" ++ PropertiesKey,
-    [{source,<<"myexchange">>},
-     {vhost,<<"/">>},
-     {destination,<<"myqueue">>},
-     {destination_type,<<"queue">>},
-     {routing_key,<<"routing">>},
-     {arguments,[{foo,<<"bar">>}]},
-     {properties_key,PropertiesKeyBin}] = http_get(URI, ?OK),
-    http_get(URI ++ "x", ?NOT_FOUND),
-    http_delete(URI, ?NO_CONTENT),
-    http_delete("/exchanges/%2f/myexchange", ?NO_CONTENT),
-    http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
-    ok.
-
-bindings_e2e_test() ->
-    BArgs = [{routing_key, <<"routing">>}, {arguments, []}],
-    http_post("/bindings/%2f/e/amq.direct/e/badexchange", BArgs, ?NOT_FOUND),
-    http_post("/bindings/%2f/e/badexchange/e/amq.fanout", BArgs, ?NOT_FOUND),
-    Headers = http_post("/bindings/%2f/e/amq.direct/e/amq.fanout", BArgs, [?CREATED, ?NO_CONTENT]),
-    "../../../../%2F/e/amq.direct/e/amq.fanout/routing" =
-        pget("location", Headers),
-    [{source,<<"amq.direct">>},
-     {vhost,<<"/">>},
-     {destination,<<"amq.fanout">>},
-     {destination_type,<<"exchange">>},
-     {routing_key,<<"routing">>},
-     {arguments,[]},
-     {properties_key,<<"routing">>}] =
-        http_get("/bindings/%2f/e/amq.direct/e/amq.fanout/routing", ?OK),
-    http_delete("/bindings/%2f/e/amq.direct/e/amq.fanout/routing", ?NO_CONTENT),
-    http_post("/bindings/%2f/e/amq.direct/e/amq.headers", BArgs, [?CREATED, ?NO_CONTENT]),
-    Binding =
-        [{source,<<"amq.direct">>},
-         {vhost,<<"/">>},
-         {destination,<<"amq.headers">>},
-         {destination_type,<<"exchange">>},
-         {routing_key,<<"routing">>},
-         {arguments,[]},
-         {properties_key,<<"routing">>}],
-    Binding = http_get("/bindings/%2f/e/amq.direct/e/amq.headers/routing"),
-    assert_list([Binding],
-                http_get("/bindings/%2f/e/amq.direct/e/amq.headers")),
-    assert_list([Binding],
-                http_get("/exchanges/%2f/amq.direct/bindings/source")),
-    assert_list([Binding],
-                http_get("/exchanges/%2f/amq.headers/bindings/destination")),
-    http_delete("/bindings/%2f/e/amq.direct/e/amq.headers/routing", ?NO_CONTENT),
-    http_get("/bindings/%2f/e/amq.direct/e/amq.headers/rooting", ?NOT_FOUND),
-    ok.
-
-permissions_administrator_test() ->
-    http_put("/users/isadmin", [{password, <<"isadmin">>},
-                                {tags, <<"administrator">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/users/notadmin", [{password, <<"notadmin">>},
-                                 {tags, <<"administrator">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/users/notadmin", [{password, <<"notadmin">>},
-                                 {tags, <<"management">>}], ?NO_CONTENT),
-    Test =
-        fun(Path) ->
-                http_get(Path, "notadmin", "notadmin", ?NOT_AUTHORISED),
-                http_get(Path, "isadmin", "isadmin", ?OK),
-                http_get(Path, "guest", "guest", ?OK)
-        end,
-    %% All users can get a list of vhosts. It may be filtered.
-    %%Test("/vhosts"),
-    Test("/vhosts/%2f"),
-    Test("/vhosts/%2f/permissions"),
-    Test("/users"),
-    Test("/users/guest"),
-    Test("/users/guest/permissions"),
-    Test("/permissions"),
-    Test("/permissions/%2f/guest"),
-    http_delete("/users/notadmin", ?NO_CONTENT),
-    http_delete("/users/isadmin", ?NO_CONTENT),
-    ok.
-
-permissions_vhost_test() ->
-    QArgs = [],
-    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
-    http_put("/users/myuser", [{password, <<"myuser">>},
-                               {tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/vhosts/myvhost1", none, [?CREATED, ?NO_CONTENT]),
-    http_put("/vhosts/myvhost2", none, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/myvhost1/myuser", PermArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/myvhost1/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/myvhost2/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
-    assert_list([[{name, <<"/">>}],
-                 [{name, <<"myvhost1">>}],
-                 [{name, <<"myvhost2">>}]], http_get("/vhosts", ?OK)),
-    assert_list([[{name, <<"myvhost1">>}]],
-                http_get("/vhosts", "myuser", "myuser", ?OK)),
-    http_put("/queues/myvhost1/myqueue", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/myvhost2/myqueue", QArgs, [?CREATED, ?NO_CONTENT]),
-    Test1 =
-        fun(Path) ->
-                Results = http_get(Path, "myuser", "myuser", ?OK),
-                [case pget(vhost, Result) of
-                     <<"myvhost2">> ->
-                         throw({got_result_from_vhost2_in, Path, Result});
-                     _ ->
-                         ok
-                 end || Result <- Results]
-        end,
-    Test2 =
-        fun(Path1, Path2) ->
-                http_get(Path1 ++ "/myvhost1/" ++ Path2, "myuser", "myuser",
-                         ?OK),
-                http_get(Path1 ++ "/myvhost2/" ++ Path2, "myuser", "myuser",
-                         ?NOT_AUTHORISED)
-        end,
-    Test1("/exchanges"),
-    Test2("/exchanges", ""),
-    Test2("/exchanges", "amq.direct"),
-    Test1("/queues"),
-    Test2("/queues", ""),
-    Test2("/queues", "myqueue"),
-    Test1("/bindings"),
-    Test2("/bindings", ""),
-    Test2("/queues", "myqueue/bindings"),
-    Test2("/exchanges", "amq.default/bindings/source"),
-    Test2("/exchanges", "amq.default/bindings/destination"),
-    Test2("/bindings", "e/amq.default/q/myqueue"),
-    Test2("/bindings", "e/amq.default/q/myqueue/myqueue"),
-    http_delete("/vhosts/myvhost1", ?NO_CONTENT),
-    http_delete("/vhosts/myvhost2", ?NO_CONTENT),
-    http_delete("/users/myuser", ?NO_CONTENT),
-    ok.
-
-permissions_amqp_test() ->
-    %% Just test that it works at all, not that it works in all possible cases.
-    QArgs = [],
-    PermArgs = [{configure, <<"foo.*">>}, {write, <<"foo.*">>},
-                {read,      <<"foo.*">>}],
-    http_put("/users/myuser", [{password, <<"myuser">>},
-                               {tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/%2f/myuser", PermArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/%2f/bar-queue", QArgs, "myuser", "myuser",
-             ?NOT_AUTHORISED),
-    http_put("/queues/%2f/bar-queue", QArgs, "nonexistent", "nonexistent",
-             ?NOT_AUTHORISED),
-    http_delete("/users/myuser", ?NO_CONTENT),
-    ok.
-
-get_conn(Username, Password) ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{
-                                         username = list_to_binary(Username),
-                                         password = list_to_binary(Password)}),
-    LocalPort = local_port(Conn),
-    ConnPath = rabbit_misc:format(
-                 "/connections/127.0.0.1%3A~w%20->%20127.0.0.1%3A5672",
-                 [LocalPort]),
-    ChPath = rabbit_misc:format(
-               "/channels/127.0.0.1%3A~w%20->%20127.0.0.1%3A5672%20(1)",
-               [LocalPort]),
-    ConnChPath = rabbit_misc:format(
-                   "/connections/127.0.0.1%3A~w%20->%20127.0.0.1%3A5672/channels",
-                   [LocalPort]),
-    {Conn, ConnPath, ChPath, ConnChPath}.
-
-permissions_connection_channel_consumer_test() ->
-    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
-    http_put("/users/user", [{password, <<"user">>},
-                             {tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/%2f/user", PermArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/users/monitor", [{password, <<"monitor">>},
-                                {tags, <<"monitoring">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/%2f/monitor", PermArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/%2f/test", [], [?CREATED, ?NO_CONTENT]),
-
-    {Conn1, UserConn, UserCh, UserConnCh} = get_conn("user", "user"),
-    {Conn2, MonConn, MonCh, MonConnCh} = get_conn("monitor", "monitor"),
-    {Conn3, AdmConn, AdmCh, AdmConnCh} = get_conn("guest", "guest"),
-    {ok, Ch1} = amqp_connection:open_channel(Conn1),
-    {ok, Ch2} = amqp_connection:open_channel(Conn2),
-    {ok, Ch3} = amqp_connection:open_channel(Conn3),
-    [amqp_channel:subscribe(
-       Ch, #'basic.consume'{queue = <<"test">>}, self()) ||
-        Ch <- [Ch1, Ch2, Ch3]],
-    AssertLength = fun (Path, User, Len) ->
-                           ?assertEqual(Len,
-                                        length(http_get(Path, User, User, ?OK)))
-                   end,
-    [begin
-         AssertLength(P, "user", 1),
-         AssertLength(P, "monitor", 3),
-         AssertLength(P, "guest", 3)
-     end || P <- ["/connections", "/channels", "/consumers", "/consumers/%2f"]],
-
-    AssertRead = fun(Path, UserStatus) ->
-                         http_get(Path, "user", "user", UserStatus),
-                         http_get(Path, "monitor", "monitor", ?OK),
-                         http_get(Path, ?OK)
-                 end,
-    AssertRead(UserConn, ?OK),
-    AssertRead(MonConn, ?NOT_AUTHORISED),
-    AssertRead(AdmConn, ?NOT_AUTHORISED),
-    AssertRead(UserCh, ?OK),
-    AssertRead(MonCh, ?NOT_AUTHORISED),
-    AssertRead(AdmCh, ?NOT_AUTHORISED),
-    AssertRead(UserConnCh, ?OK),
-    AssertRead(MonConnCh, ?NOT_AUTHORISED),
-    AssertRead(AdmConnCh, ?NOT_AUTHORISED),
-
-    AssertClose = fun(Path, User, Status) ->
-                          http_delete(Path, User, User, Status)
-                  end,
-    AssertClose(UserConn, "monitor", ?NOT_AUTHORISED),
-    AssertClose(MonConn, "user", ?NOT_AUTHORISED),
-    AssertClose(AdmConn, "guest", ?NO_CONTENT),
-    AssertClose(MonConn, "guest", ?NO_CONTENT),
-    AssertClose(UserConn, "user", ?NO_CONTENT),
-
-    http_delete("/users/user", ?NO_CONTENT),
-    http_delete("/users/monitor", ?NO_CONTENT),
-    http_get("/connections/foo", ?NOT_FOUND),
-    http_get("/channels/foo", ?NOT_FOUND),
-    http_delete("/queues/%2f/test", ?NO_CONTENT),
-    ok.
-
-
-
-
-consumers_test() ->
-    http_put("/queues/%2f/test", [], [?CREATED, ?NO_CONTENT]),
-    {Conn, _ConnPath, _ChPath, _ConnChPath} = get_conn("guest", "guest"),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    amqp_channel:subscribe(
-      Ch, #'basic.consume'{queue        = <<"test">>,
-                           no_ack       = false,
-                           consumer_tag = <<"my-ctag">> }, self()),
-    assert_list([[{exclusive,    false},
-                  {ack_required, true},
-                  {consumer_tag, <<"my-ctag">>}]], http_get("/consumers")),
-    amqp_connection:close(Conn),
-    http_delete("/queues/%2f/test", ?NO_CONTENT),
-    ok.
-
-defs(Key, URI, CreateMethod, Args) ->
-    defs(Key, URI, CreateMethod, Args,
-         fun(URI2) -> http_delete(URI2, ?NO_CONTENT) end).
-
-defs_v(Key, URI, CreateMethod, Args) ->
-    Rep1 = fun (S, S2) -> re:replace(S, "<vhost>", S2, [{return, list}]) end,
-    Rep2 = fun (L, V2) -> lists:keymap(fun (vhost) -> V2;
-                                           (V)     -> V end, 2, L) end,
-    %% Test against default vhost
-    defs(Key, Rep1(URI, "%2f"), CreateMethod, Rep2(Args, <<"/">>)),
-
-    %% Test against new vhost
-    http_put("/vhosts/test", none, [?CREATED, ?NO_CONTENT]),
-    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
-    http_put("/permissions/test/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
-    defs(Key, Rep1(URI, "test"), CreateMethod, Rep2(Args, <<"test">>),
-         fun(URI2) -> http_delete(URI2, ?NO_CONTENT),
-                      http_delete("/vhosts/test", ?NO_CONTENT) end).
-
-create(CreateMethod, URI, Args) ->
-    case CreateMethod of
-        put        -> http_put(URI, Args, [?CREATED, ?NO_CONTENT]),
-                      URI;
-        put_update -> http_put(URI, Args, ?NO_CONTENT),
-                      URI;
-        post       -> Headers = http_post(URI, Args, [?CREATED, ?NO_CONTENT]),
-                      rabbit_web_dispatch_util:unrelativise(
-                        URI, pget("location", Headers))
-    end.
-
-defs(Key, URI, CreateMethod, Args, DeleteFun) ->
-    %% Create the item
-    URI2 = create(CreateMethod, URI, Args),
-    %% Make sure it ends up in definitions
-    Definitions = http_get("/definitions", ?OK),
-    true = lists:any(fun(I) -> test_item(Args, I) end, pget(Key, Definitions)),
-
-    %% Delete it
-    DeleteFun(URI2),
-
-    %% Post the definitions back, it should get recreated in correct form
-    http_post("/definitions", Definitions, ?CREATED),
-    assert_item(Args, http_get(URI2, ?OK)),
-
-    %% And delete it again
-    DeleteFun(URI2),
-
-    ok.
-
-definitions_test() ->
-    rabbit_runtime_parameters_test:register(),
-    rabbit_runtime_parameters_test:register_policy_validator(),
-
-    defs_v(queues, "/queues/<vhost>/my-queue", put,
-           [{name,    <<"my-queue">>},
-            {durable, true}]),
-    defs_v(exchanges, "/exchanges/<vhost>/my-exchange", put,
-           [{name, <<"my-exchange">>},
-            {type, <<"direct">>}]),
-    defs_v(bindings, "/bindings/<vhost>/e/amq.direct/e/amq.fanout", post,
-           [{routing_key, <<"routing">>}, {arguments, []}]),
-    defs_v(policies, "/policies/<vhost>/my-policy", put,
-           [{vhost,      vhost},
-            {name,       <<"my-policy">>},
-            {pattern,    <<".*">>},
-            {definition, [{testpos, [1, 2, 3]}]},
-            {priority,   1}]),
-    defs_v(parameters, "/parameters/test/<vhost>/good", put,
-           [{vhost,     vhost},
-            {component, <<"test">>},
-            {name,      <<"good">>},
-            {value,     <<"ignore">>}]),
-    defs(users, "/users/myuser", put,
-         [{name,          <<"myuser">>},
-          {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
-          {hashing_algorithm, <<"rabbit_password_hashing_sha256">>},
-          {tags,          <<"management">>}]),
-    defs(vhosts, "/vhosts/myvhost", put,
-         [{name, <<"myvhost">>}]),
-    defs(permissions, "/permissions/%2f/guest", put,
-         [{user,      <<"guest">>},
-          {vhost,     <<"/">>},
-          {configure, <<"c">>},
-          {write,     <<"w">>},
-          {read,      <<"r">>}]),
-
-    %% We just messed with guest's permissions
-    http_put("/permissions/%2f/guest",
-             [{configure, <<".*">>},
-              {write,     <<".*">>},
-              {read,      <<".*">>}], [?CREATED, ?NO_CONTENT]),
-    BrokenConfig =
-        [{users,       []},
-         {vhosts,      []},
-         {permissions, []},
-         {queues,      []},
-         {exchanges,   [[{name,        <<"amq.direct">>},
-                         {vhost,       <<"/">>},
-                         {type,        <<"definitely not direct">>},
-                         {durable,     true},
-                         {auto_delete, false},
-                         {arguments,   []}
-                        ]]},
-         {bindings,    []}],
-    http_post("/definitions", BrokenConfig, ?BAD_REQUEST),
-
-    rabbit_runtime_parameters_test:unregister_policy_validator(),
-    rabbit_runtime_parameters_test:unregister(),
-    ok.
-
-defs_vhost(Key, URI, CreateMethod, Args) ->
-    Rep1 = fun (S, S2) -> re:replace(S, "<vhost>", S2, [{return, list}]) end,
-    Rep2 = fun (L, V2) -> lists:keymap(fun (vhost) -> V2;
-                                           (V)     -> V end, 2, L) end,
-
-    %% Create test vhost
-    http_put("/vhosts/test", none, [?CREATED, ?NO_CONTENT]),
-    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
-    http_put("/permissions/test/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
-
-    %% Test against default vhost
-    defs_vhost(Key, URI, Rep1, "%2f", "test", CreateMethod,
-               Rep2(Args, <<"/">>), Rep2(Args, <<"test">>),
-               fun(URI2) -> http_delete(URI2, [?NO_CONTENT, ?CREATED]) end),
-
-    %% Test against test vhost
-    defs_vhost(Key, URI, Rep1, "test", "%2f", CreateMethod,
-               Rep2(Args, <<"test">>), Rep2(Args, <<"/">>),
-               fun(URI2) -> http_delete(URI2, [?NO_CONTENT, ?CREATED]) end),
-
-    %% Remove test vhost
-    http_delete("/vhosts/test", ?NO_CONTENT).
-
-
-defs_vhost(Key, URI0, Rep1, VHost1, VHost2, CreateMethod, Args1, Args2,
-           DeleteFun) ->
-    %% Create the item
-    URI2 = create(CreateMethod, Rep1(URI0, VHost1), Args1),
-    %% Make sure it ends up in definitions
-    Definitions = http_get("/definitions/" ++ VHost1, ?OK),
-    true = lists:any(fun(I) -> test_item(Args1, I) end, pget(Key, Definitions)),
-
-    %% Make sure it is not in the other vhost
-    Definitions0 = http_get("/definitions/" ++ VHost2, ?OK),
-    false = lists:any(fun(I) -> test_item(Args2, I) end, pget(Key, Definitions0)),
-
-    %% Post the definitions back
-    http_post("/definitions/" ++ VHost2, Definitions, [?NO_CONTENT, ?CREATED]),
-
-    %% Make sure it is now in the other vhost
-    Definitions1 = http_get("/definitions/" ++ VHost2, ?OK),
-    true = lists:any(fun(I) -> test_item(Args2, I) end, pget(Key, Definitions1)),
-
-    %% Delete it
-    DeleteFun(URI2),
-    URI3 = create(CreateMethod, Rep1(URI0, VHost2), Args2),
-    DeleteFun(URI3),
-    ok.
-
-definitions_vhost_test() ->
-    %% Ensures that definitions can be exported/imported from a single virtual
-    %% host to another
-
-    rabbit_runtime_parameters_test:register(),
-    rabbit_runtime_parameters_test:register_policy_validator(),
-
-    defs_vhost(queues, "/queues/<vhost>/my-queue", put,
-               [{name,    <<"my-queue">>},
-                {durable, true}]),
-    defs_vhost(exchanges, "/exchanges/<vhost>/my-exchange", put,
-               [{name, <<"my-exchange">>},
-                {type, <<"direct">>}]),
-    defs_vhost(bindings, "/bindings/<vhost>/e/amq.direct/e/amq.fanout", post,
-               [{routing_key, <<"routing">>}, {arguments, []}]),
-    defs_vhost(policies, "/policies/<vhost>/my-policy", put,
-               [{vhost,      vhost},
-                {name,       <<"my-policy">>},
-                {pattern,    <<".*">>},
-                {definition, [{testpos, [1, 2, 3]}]},
-                {priority,   1}]),
-
-    Config =
-        [{queues,      []},
-         {exchanges,   []},
-         {policies,    []},
-         {bindings,    []}],
-    http_post("/definitions/othervhost", Config, ?BAD_REQUEST),
-
-    rabbit_runtime_parameters_test:unregister_policy_validator(),
-    rabbit_runtime_parameters_test:unregister(),
-    ok.
-
-definitions_password_test() ->
-    % Import definitions from 3.5.x
-    Config35 = [{rabbit_version, <<"3.5.4">>}, 
-                {users, [[{name,          <<"myuser">>},
-                          {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
-                          {tags,          <<"management">>}]
-                        ]}],
-    Expected35 = [{name,          <<"myuser">>},
-                  {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
-                  {hashing_algorithm, <<"rabbit_password_hashing_md5">>},
-                  {tags,          <<"management">>}],
-    http_post("/definitions", Config35, ?CREATED),
-    Definitions35 = http_get("/definitions", ?OK),
-
-    Users35 = pget(users, Definitions35),
-
-    io:format("Defs: ~p ~n Exp: ~p~n", [Users35, Expected35]),
-
-    true = lists:any(fun(I) -> test_item(Expected35, I) end, Users35),
-
-    %% Import definitions from from 3.6.0
-    Config36 = [{rabbit_version, <<"3.6.0">>}, 
-                {users, [[{name,          <<"myuser">>},
-                          {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
-                          {tags,          <<"management">>}]
-                        ]}],
-    Expected36 = [{name,          <<"myuser">>},
-                  {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
-                  {hashing_algorithm, <<"rabbit_password_hashing_sha256">>},
-                  {tags,          <<"management">>}],
-    http_post("/definitions", Config36, ?CREATED),
-
-    Definitions36 = http_get("/definitions", ?OK),
-    Users36 = pget(users, Definitions36),
-
-    true = lists:any(fun(I) -> test_item(Expected36, I) end, Users36),
-
-    %% No hashing_algorithm provided
-    ConfigDefault = [{rabbit_version, <<"3.6.1">>}, 
-                     {users, [[{name,          <<"myuser">>},
-                               {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
-                               {tags,          <<"management">>}]
-                             ]}],
-    application:set_env(rabbit, 
-                        password_hashing_module, 
-                        rabbit_password_hashing_sha512),
-
-    ExpectedDefault = [{name,          <<"myuser">>},
-                       {password_hash, <<"WAbU0ZIcvjTpxM3Q3SbJhEAM2tQ=">>},
-                       {hashing_algorithm, <<"rabbit_password_hashing_sha512">>},
-                       {tags,          <<"management">>}],
-    http_post("/definitions", ConfigDefault, ?CREATED),
-
-    DefinitionsDefault = http_get("/definitions", ?OK),
-    UsersDefault = pget(users, DefinitionsDefault),
-
-    true = lists:any(fun(I) -> test_item(ExpectedDefault, I) end, UsersDefault),
-    ok.
-
-definitions_remove_things_test() ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    amqp_channel:call(Ch, #'queue.declare'{ queue = <<"my-exclusive">>,
-                                            exclusive = true }),
-    http_get("/queues/%2f/my-exclusive", ?OK),
-    Definitions = http_get("/definitions", ?OK),
-    [] = pget(queues, Definitions),
-    [] = pget(exchanges, Definitions),
-    [] = pget(bindings, Definitions),
-    amqp_channel:close(Ch),
-    amqp_connection:close(Conn),
-    ok.
-
-definitions_server_named_queue_test() ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    #'queue.declare_ok'{ queue = QName } =
-        amqp_channel:call(Ch, #'queue.declare'{}),
-    amqp_channel:close(Ch),
-    amqp_connection:close(Conn),
-    Path = "/queues/%2f/" ++ mochiweb_util:quote_plus(QName),
-    http_get(Path, ?OK),
-    Definitions = http_get("/definitions", ?OK),
-    http_delete(Path, ?NO_CONTENT),
-    http_get(Path, ?NOT_FOUND),
-    http_post("/definitions", Definitions, [?CREATED, ?NO_CONTENT]),
-    http_get(Path, ?OK),
-    http_delete(Path, ?NO_CONTENT),
-    ok.
-
-aliveness_test() ->
-    [{status, <<"ok">>}] = http_get("/aliveness-test/%2f", ?OK),
-    http_get("/aliveness-test/foo", ?NOT_FOUND),
-    http_delete("/queues/%2f/aliveness-test", ?NO_CONTENT),
-    ok.
-
-arguments_test() ->
-    XArgs = [{type, <<"headers">>},
-             {arguments, [{'alternate-exchange', <<"amq.direct">>}]}],
-    QArgs = [{arguments, [{'x-expires', 1800000}]}],
-    BArgs = [{routing_key, <<"">>},
-             {arguments, [{'x-match', <<"all">>},
-                          {foo, <<"bar">>}]}],
-    http_put("/exchanges/%2f/myexchange", XArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/%2f/myqueue", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_post("/bindings/%2f/e/myexchange/q/myqueue", BArgs, [?CREATED, ?NO_CONTENT]),
-    Definitions = http_get("/definitions", ?OK),
-    http_delete("/exchanges/%2f/myexchange", ?NO_CONTENT),
-    http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
-    http_post("/definitions", Definitions, ?CREATED),
-    [{'alternate-exchange', <<"amq.direct">>}] =
-        pget(arguments, http_get("/exchanges/%2f/myexchange", ?OK)),
-    [{'x-expires', 1800000}] =
-        pget(arguments, http_get("/queues/%2f/myqueue", ?OK)),
-    true = lists:sort([{'x-match', <<"all">>}, {foo, <<"bar">>}]) =:=
-       lists:sort(pget(arguments,
-                       http_get("/bindings/%2f/e/myexchange/q/myqueue/" ++
-                                    "~nXOkVwqZzUOdS9_HcBWheg", ?OK))),
-    http_delete("/exchanges/%2f/myexchange", ?NO_CONTENT),
-    http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
-    ok.
-
-arguments_table_test() ->
-    Args = [{'upstreams', [<<"amqp://localhost/%2f/upstream1">>,
-                           <<"amqp://localhost/%2f/upstream2">>]}],
-    XArgs = [{type, <<"headers">>},
-             {arguments, Args}],
-    http_put("/exchanges/%2f/myexchange", XArgs, [?CREATED, ?NO_CONTENT]),
-    Definitions = http_get("/definitions", ?OK),
-    http_delete("/exchanges/%2f/myexchange", ?NO_CONTENT),
-    http_post("/definitions", Definitions, ?CREATED),
-    Args = pget(arguments, http_get("/exchanges/%2f/myexchange", ?OK)),
-    http_delete("/exchanges/%2f/myexchange", ?NO_CONTENT),
-    ok.
-
-queue_purge_test() ->
-    QArgs = [],
-    http_put("/queues/%2f/myqueue", QArgs, [?CREATED, ?NO_CONTENT]),
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    Publish = fun() ->
-                      amqp_channel:call(
-                        Ch, #'basic.publish'{exchange = <<"">>,
-                                             routing_key = <<"myqueue">>},
-                        #amqp_msg{payload = <<"message">>})
-              end,
-    Publish(),
-    Publish(),
-    amqp_channel:call(
-      Ch, #'queue.declare'{queue = <<"exclusive">>, exclusive = true}),
-    {#'basic.get_ok'{}, _} =
-        amqp_channel:call(Ch, #'basic.get'{queue = <<"myqueue">>}),
-    http_delete("/queues/%2f/myqueue/contents", ?NO_CONTENT),
-    http_delete("/queues/%2f/badqueue/contents", ?NOT_FOUND),
-    http_delete("/queues/%2f/exclusive/contents", ?BAD_REQUEST),
-    http_delete("/queues/%2f/exclusive", ?BAD_REQUEST),
-    #'basic.get_empty'{} =
-        amqp_channel:call(Ch, #'basic.get'{queue = <<"myqueue">>}),
-    amqp_channel:close(Ch),
-    amqp_connection:close(Conn),
-    http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
-    ok.
-
-queue_actions_test() ->
-    http_put("/queues/%2f/q", [], [?CREATED, ?NO_CONTENT]),
-    http_post("/queues/%2f/q/actions", [{action, sync}], ?NO_CONTENT),
-    http_post("/queues/%2f/q/actions", [{action, cancel_sync}], ?NO_CONTENT),
-    http_post("/queues/%2f/q/actions", [{action, change_colour}], ?BAD_REQUEST),
-    http_delete("/queues/%2f/q", ?NO_CONTENT),
-    ok.
-
-exclusive_consumer_test() ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    #'queue.declare_ok'{ queue = QName } =
-        amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
-    amqp_channel:subscribe(Ch, #'basic.consume'{queue     = QName,
-                                                exclusive = true}, self()),
-    timer:sleep(1000), %% Sadly we need to sleep to let the stats update
-    http_get("/queues/%2f/"), %% Just check we don't blow up
-    amqp_channel:close(Ch),
-    amqp_connection:close(Conn),
-    ok.
-
-
-exclusive_queue_test() ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    #'queue.declare_ok'{ queue = QName } =
-       amqp_channel:call(Ch, #'queue.declare'{exclusive = true}),
-    timer:sleep(1000), %% Sadly we need to sleep to let the stats update
-    Path = "/queues/%2f/" ++ mochiweb_util:quote_plus(QName),
-    Queue = http_get(Path),
-    assert_item([{name,         QName},
-                {vhost,       <<"/">>},
-                {durable,     false},
-                {auto_delete, false},
-                {exclusive,   true},
-                {arguments,   []}], Queue),
-    amqp_channel:close(Ch),
-    amqp_connection:close(Conn),
-    ok.
-
-connections_channels_pagination_test() ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    {ok, Conn1} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch1} = amqp_connection:open_channel(Conn1),
-    {ok, Conn2} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch2} = amqp_connection:open_channel(Conn2),
-
-    timer:sleep(1000), %% Sadly we need to sleep to let the stats update
-    PageOfTwo = http_get("/connections?page=1&page_size=2", ?OK),
-    ?assertEqual(3, proplists:get_value(total_count, PageOfTwo)),
-    ?assertEqual(3, proplists:get_value(filtered_count, PageOfTwo)),
-    ?assertEqual(2, proplists:get_value(item_count, PageOfTwo)),
-    ?assertEqual(1, proplists:get_value(page, PageOfTwo)),
-    ?assertEqual(2, proplists:get_value(page_size, PageOfTwo)),
-    ?assertEqual(2, proplists:get_value(page_count, PageOfTwo)),
-
-
-    TwoOfTwo = http_get("/channels?page=2&page_size=2", ?OK),
-    ?assertEqual(3, proplists:get_value(total_count, TwoOfTwo)),
-    ?assertEqual(3, proplists:get_value(filtered_count, TwoOfTwo)),
-    ?assertEqual(1, proplists:get_value(item_count, TwoOfTwo)),
-    ?assertEqual(2, proplists:get_value(page, TwoOfTwo)),
-    ?assertEqual(2, proplists:get_value(page_size, TwoOfTwo)),
-    ?assertEqual(2, proplists:get_value(page_count, TwoOfTwo)),
-
-    amqp_channel:close(Ch),
-    amqp_connection:close(Conn),
-    amqp_channel:close(Ch1),
-    amqp_connection:close(Conn1),
-    amqp_channel:close(Ch2),
-    amqp_connection:close(Conn2),
-    ok.
-
-exchanges_pagination_test() ->
-    QArgs = [],
-    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
-    http_put("/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/vh1/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
-    http_get("/exchanges/vh1?page=1&page_size=2", ?OK),
-    http_put("/exchanges/%2f/test0", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/exchanges/vh1/test1", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/exchanges/%2f/test2_reg", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/exchanges/vh1/reg_test3", QArgs, [?CREATED, ?NO_CONTENT]),
-    PageOfTwo = http_get("/exchanges?page=1&page_size=2", ?OK),
-    ?assertEqual(19, proplists:get_value(total_count, PageOfTwo)),
-    ?assertEqual(19, proplists:get_value(filtered_count, PageOfTwo)),
-    ?assertEqual(2, proplists:get_value(item_count, PageOfTwo)),
-    ?assertEqual(1, proplists:get_value(page, PageOfTwo)),
-    ?assertEqual(2, proplists:get_value(page_size, PageOfTwo)),
-    ?assertEqual(10, proplists:get_value(page_count, PageOfTwo)),
-    assert_list([[{name, <<"">>}, {vhost, <<"/">>}],
-                [{name, <<"amq.direct">>}, {vhost, <<"/">>}]
-               ], proplists:get_value(items, PageOfTwo)),
-
-    ByName = http_get("/exchanges?page=1&page_size=2&name=reg", ?OK),
-    ?assertEqual(19, proplists:get_value(total_count, ByName)),
-    ?assertEqual(2, proplists:get_value(filtered_count, ByName)),
-    ?assertEqual(2, proplists:get_value(item_count, ByName)),
-    ?assertEqual(1, proplists:get_value(page, ByName)),
-    ?assertEqual(2, proplists:get_value(page_size, ByName)),
-    ?assertEqual(1, proplists:get_value(page_count, ByName)),
-    assert_list([[{name, <<"test2_reg">>}, {vhost, <<"/">>}],
-                [{name, <<"reg_test3">>}, {vhost, <<"vh1">>}]
-               ], proplists:get_value(items, ByName)),
-
-
-    RegExByName = http_get(
-                   "/exchanges?page=1&page_size=2&name=^(?=^reg)&use_regex=true",
-                   ?OK),
-    ?assertEqual(19, proplists:get_value(total_count, RegExByName)),
-    ?assertEqual(1, proplists:get_value(filtered_count, RegExByName)),
-    ?assertEqual(1, proplists:get_value(item_count, RegExByName)),
-    ?assertEqual(1, proplists:get_value(page, RegExByName)),
-    ?assertEqual(2, proplists:get_value(page_size, RegExByName)),
-    ?assertEqual(1, proplists:get_value(page_count, RegExByName)),
-    assert_list([[{name, <<"reg_test3">>}, {vhost, <<"vh1">>}]
-               ], proplists:get_value(items, RegExByName)),
-
-
-    http_get("/exchanges?page=1000", ?BAD_REQUEST),
-    http_get("/exchanges?page=-1", ?BAD_REQUEST),
-    http_get("/exchanges?page=not_an_integer_value", ?BAD_REQUEST),
-    http_get("/exchanges?page=1&page_size=not_an_intger_value", ?BAD_REQUEST),
-    http_get("/exchanges?page=1&page_size=501", ?BAD_REQUEST), %% max 500 allowed
-    http_get("/exchanges?page=-1&page_size=-2", ?BAD_REQUEST),
-    http_delete("/exchanges/%2f/test0", ?NO_CONTENT),
-    http_delete("/exchanges/vh1/test1", ?NO_CONTENT),
-    http_delete("/exchanges/%2f/test2_reg", ?NO_CONTENT),
-    http_delete("/exchanges/vh1/reg_test3", ?NO_CONTENT),
-    http_delete("/vhosts/vh1", ?NO_CONTENT),
-    ok.
-
-exchanges_pagination_permissions_test() ->
-    http_put("/users/admin",   [{password, <<"admin">>},
-                               {tags, <<"administrator">>}], [?CREATED, ?NO_CONTENT]),
-    Perms = [{configure, <<".*">>},
-            {write,     <<".*">>},
-            {read,      <<".*">>}],
-    http_put("/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/vh1/admin",   Perms, [?CREATED, ?NO_CONTENT]),
-    QArgs = [],
-    http_put("/exchanges/%2f/test0", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/exchanges/vh1/test1", QArgs, "admin","admin", [?CREATED, ?NO_CONTENT]),
-    FirstPage = http_get("/exchanges?page=1&name=test1","admin","admin", ?OK),
-    ?assertEqual(8, proplists:get_value(total_count, FirstPage)),
-    ?assertEqual(1, proplists:get_value(item_count, FirstPage)),
-    ?assertEqual(1, proplists:get_value(page, FirstPage)),
-    ?assertEqual(100, proplists:get_value(page_size, FirstPage)),
-    ?assertEqual(1, proplists:get_value(page_count, FirstPage)),
-    assert_list([[{name, <<"test1">>}, {vhost, <<"vh1">>}]
-               ], proplists:get_value(items, FirstPage)),
-    http_delete("/exchanges/%2f/test0", ?NO_CONTENT),
-    http_delete("/exchanges/vh1/test1","admin","admin", ?NO_CONTENT),
-    http_delete("/users/admin", ?NO_CONTENT),
-    ok.
-
-
-
-queue_pagination_test() ->
-    QArgs = [],
-    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
-    http_put("/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/vh1/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
-
-    http_get("/queues/vh1?page=1&page_size=2", ?OK),
-
-    http_put("/queues/%2f/test0", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/vh1/test1", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/%2f/test2_reg", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/vh1/reg_test3", QArgs, [?CREATED, ?NO_CONTENT]),
-    PageOfTwo = http_get("/queues?page=1&page_size=2", ?OK),
-    ?assertEqual(4, proplists:get_value(total_count, PageOfTwo)),
-    ?assertEqual(4, proplists:get_value(filtered_count, PageOfTwo)),
-    ?assertEqual(2, proplists:get_value(item_count, PageOfTwo)),
-    ?assertEqual(1, proplists:get_value(page, PageOfTwo)),
-    ?assertEqual(2, proplists:get_value(page_size, PageOfTwo)),
-    ?assertEqual(2, proplists:get_value(page_count, PageOfTwo)),
-    assert_list([[{name, <<"test0">>}, {vhost, <<"/">>}],
-                [{name, <<"test2_reg">>}, {vhost, <<"/">>}]
-               ], proplists:get_value(items, PageOfTwo)),
-
-    SortedByName = http_get("/queues?sort=name&page=1&page_size=2", ?OK),
-    ?assertEqual(4, proplists:get_value(total_count, SortedByName)),
-    ?assertEqual(4, proplists:get_value(filtered_count, SortedByName)),
-    ?assertEqual(2, proplists:get_value(item_count, SortedByName)),
-    ?assertEqual(1, proplists:get_value(page, SortedByName)),
-    ?assertEqual(2, proplists:get_value(page_size, SortedByName)),
-    ?assertEqual(2, proplists:get_value(page_count, SortedByName)),
-    assert_list([[{name, <<"reg_test3">>}, {vhost, <<"vh1">>}],
-                [{name, <<"test0">>}, {vhost, <<"/">>}]
-               ], proplists:get_value(items, SortedByName)),
-
-
-    FirstPage = http_get("/queues?page=1", ?OK),
-    ?assertEqual(4, proplists:get_value(total_count, FirstPage)),
-    ?assertEqual(4, proplists:get_value(filtered_count, FirstPage)),
-    ?assertEqual(4, proplists:get_value(item_count, FirstPage)),
-    ?assertEqual(1, proplists:get_value(page, FirstPage)),
-    ?assertEqual(100, proplists:get_value(page_size, FirstPage)),
-    ?assertEqual(1, proplists:get_value(page_count, FirstPage)),
-    assert_list([[{name, <<"test0">>}, {vhost, <<"/">>}],
-                [{name, <<"test1">>}, {vhost, <<"vh1">>}],
-                [{name, <<"test2_reg">>}, {vhost, <<"/">>}],
-                [{name, <<"reg_test3">>}, {vhost, <<"vh1">>}]
-               ], proplists:get_value(items, FirstPage)),
-
-
-    ReverseSortedByName = http_get(
-                   "/queues?page=2&page_size=2&sort=name&sort_reverse=true", 
-                   ?OK),
-    ?assertEqual(4, proplists:get_value(total_count, ReverseSortedByName)),
-    ?assertEqual(4, proplists:get_value(filtered_count, ReverseSortedByName)),
-    ?assertEqual(2, proplists:get_value(item_count, ReverseSortedByName)),
-    ?assertEqual(2, proplists:get_value(page, ReverseSortedByName)),
-    ?assertEqual(2, proplists:get_value(page_size, ReverseSortedByName)),
-    ?assertEqual(2, proplists:get_value(page_count, ReverseSortedByName)),
-    assert_list([[{name, <<"test0">>}, {vhost, <<"/">>}],
-                [{name, <<"reg_test3">>}, {vhost, <<"vh1">>}]
-               ], proplists:get_value(items, ReverseSortedByName)),
-
-                                               
-    ByName = http_get("/queues?page=1&page_size=2&name=reg", ?OK),
-    ?assertEqual(4, proplists:get_value(total_count, ByName)),
-    ?assertEqual(2, proplists:get_value(filtered_count, ByName)),
-    ?assertEqual(2, proplists:get_value(item_count, ByName)),
-    ?assertEqual(1, proplists:get_value(page, ByName)),
-    ?assertEqual(2, proplists:get_value(page_size, ByName)),
-    ?assertEqual(1, proplists:get_value(page_count, ByName)),
-    assert_list([[{name, <<"test2_reg">>}, {vhost, <<"/">>}],
-                [{name, <<"reg_test3">>}, {vhost, <<"vh1">>}]
-               ], proplists:get_value(items, ByName)),
-
-    RegExByName = http_get(
-                   "/queues?page=1&page_size=2&name=^(?=^reg)&use_regex=true",
-                   ?OK),
-    ?assertEqual(4, proplists:get_value(total_count, RegExByName)),
-    ?assertEqual(1, proplists:get_value(filtered_count, RegExByName)),
-    ?assertEqual(1, proplists:get_value(item_count, RegExByName)),
-    ?assertEqual(1, proplists:get_value(page, RegExByName)),
-    ?assertEqual(2, proplists:get_value(page_size, RegExByName)),
-    ?assertEqual(1, proplists:get_value(page_count, RegExByName)),
-    assert_list([[{name, <<"reg_test3">>}, {vhost, <<"vh1">>}]
-               ], proplists:get_value(items, RegExByName)),
-
-
-    http_get("/queues?page=1000", ?BAD_REQUEST),
-    http_get("/queues?page=-1", ?BAD_REQUEST),
-    http_get("/queues?page=not_an_integer_value", ?BAD_REQUEST),
-    http_get("/queues?page=1&page_size=not_an_intger_value", ?BAD_REQUEST),
-    http_get("/queues?page=1&page_size=501", ?BAD_REQUEST), %% max 500 allowed
-    http_get("/queues?page=-1&page_size=-2", ?BAD_REQUEST),
-    http_delete("/queues/%2f/test0", ?NO_CONTENT),
-    http_delete("/queues/vh1/test1", ?NO_CONTENT),
-    http_delete("/queues/%2f/test2_reg", ?NO_CONTENT),
-    http_delete("/queues/vh1/reg_test3", ?NO_CONTENT),
-    http_delete("/vhosts/vh1", ?NO_CONTENT),
-    ok.
-
-queues_pagination_permissions_test() ->
-    http_put("/users/admin",   [{password, <<"admin">>},
-                               {tags, <<"administrator">>}], [?CREATED, ?NO_CONTENT]),
-    Perms = [{configure, <<".*">>},
-            {write,     <<".*">>},
-            {read,      <<".*">>}],
-    http_put("/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/vh1/admin",   Perms, [?CREATED, ?NO_CONTENT]),
-    QArgs = [],
-    http_put("/queues/%2f/test0", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/vh1/test1", QArgs, "admin","admin", [?CREATED, ?NO_CONTENT]),
-    FirstPage = http_get("/queues?page=1","admin","admin", ?OK),
-    ?assertEqual(1, proplists:get_value(total_count, FirstPage)),
-    ?assertEqual(1, proplists:get_value(item_count, FirstPage)),
-    ?assertEqual(1, proplists:get_value(page, FirstPage)),
-    ?assertEqual(100, proplists:get_value(page_size, FirstPage)),
-    ?assertEqual(1, proplists:get_value(page_count, FirstPage)),
-    assert_list([[{name, <<"test1">>}, {vhost, <<"vh1">>}]
-               ], proplists:get_value(items, FirstPage)),
-    http_delete("/queues/%2f/test0", ?NO_CONTENT),
-    http_delete("/queues/vh1/test1","admin","admin", ?NO_CONTENT),
-    http_delete("/users/admin", ?NO_CONTENT),
-    ok.
-
-samples_range_test() ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-
-    %% Channels.
-
-    [ConnInfo] = http_get("/channels?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    {_, ConnDetails} = lists:keyfind(connection_details, 1, ConnInfo),
-    {_, ConnName0} = lists:keyfind(name, 1, ConnDetails),
-    ConnName = http_uri:encode(binary_to_list(ConnName0)),
-    ChanName = ConnName ++ http_uri:encode(" (1)"),
-
-    http_get("/channels/" ++ ChanName ++ "?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/channels/" ++ ChanName ++ "?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    http_get("/vhosts/%2f/channels?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/vhosts/%2f/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    %% Connections.
-
-    http_get("/connections?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/connections?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    http_get("/connections/" ++ ConnName ++ "?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/connections/" ++ ConnName ++ "?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    http_get("/connections/" ++ ConnName ++ "/channels?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/connections/" ++ ConnName ++ "/channels?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    http_get("/vhosts/%2f/connections?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/vhosts/%2f/connections?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    amqp_channel:close(Ch),
-    amqp_connection:close(Conn),
-
-    %% Exchanges.
-
-    http_get("/exchanges?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/exchanges?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    http_get("/exchanges/%2f/amq.direct?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/exchanges/%2f/amq.direct?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    %% Nodes.
-
-    http_get("/nodes?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/nodes?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    %% Overview.
-
-    http_get("/overview?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/overview?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    %% Queues.
-
-    http_put("/queues/%2f/test0", [], [?CREATED, ?NO_CONTENT]),
-
-    http_get("/queues/%2f?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/queues/%2f?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-    http_get("/queues/%2f/test0?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/queues/%2f/test0?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    http_delete("/queues/%2f/test0", ?NO_CONTENT),
-
-    %% Vhosts.
-
-    http_put("/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
-
-    http_get("/vhosts?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/vhosts?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-    http_get("/vhosts/vh1?lengths_age=60&lengths_incr=1", ?OK),
-    http_get("/vhosts/vh1?lengths_age=6000&lengths_incr=1", ?BAD_REQUEST),
-
-    http_delete("/vhosts/vh1", ?NO_CONTENT),
-
-    ok.
-
-sorting_test() ->
-    QArgs = [],
-    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
-    http_put("/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/vh1/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/%2f/test0", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/vh1/test1", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/%2f/test2", QArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/vh1/test3", QArgs, [?CREATED, ?NO_CONTENT]),
-    assert_list([[{name, <<"test0">>}],
-                 [{name, <<"test2">>}],
-                 [{name, <<"test1">>}],
-                 [{name, <<"test3">>}]], http_get("/queues", ?OK)),
-    assert_list([[{name, <<"test0">>}],
-                 [{name, <<"test1">>}],
-                 [{name, <<"test2">>}],
-                 [{name, <<"test3">>}]], http_get("/queues?sort=name", ?OK)),
-    assert_list([[{name, <<"test0">>}],
-                 [{name, <<"test2">>}],
-                 [{name, <<"test1">>}],
-                 [{name, <<"test3">>}]], http_get("/queues?sort=vhost", ?OK)),
-    assert_list([[{name, <<"test3">>}],
-                 [{name, <<"test1">>}],
-                 [{name, <<"test2">>}],
-                 [{name, <<"test0">>}]], http_get("/queues?sort_reverse=true", ?OK)),
-    assert_list([[{name, <<"test3">>}],
-                 [{name, <<"test2">>}],
-                 [{name, <<"test1">>}],
-                 [{name, <<"test0">>}]], http_get("/queues?sort=name&sort_reverse=true", ?OK)),
-    assert_list([[{name, <<"test3">>}],
-                 [{name, <<"test1">>}],
-                 [{name, <<"test2">>}],
-                 [{name, <<"test0">>}]], http_get("/queues?sort=vhost&sort_reverse=true", ?OK)),
-    %% Rather poor but at least test it doesn't blow up with dots
-    http_get("/queues?sort=owner_pid_details.name", ?OK),
-    http_delete("/queues/%2f/test0", ?NO_CONTENT),
-    http_delete("/queues/vh1/test1", ?NO_CONTENT),
-    http_delete("/queues/%2f/test2", ?NO_CONTENT),
-    http_delete("/queues/vh1/test3", ?NO_CONTENT),
-    http_delete("/vhosts/vh1", ?NO_CONTENT),
-    ok.
-
-format_output_test() ->
-    QArgs = [],
-    PermArgs = [{configure, <<".*">>}, {write, <<".*">>}, {read, <<".*">>}],
-    http_put("/vhosts/vh1", none, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/vh1/guest", PermArgs, [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/%2f/test0", QArgs, [?CREATED, ?NO_CONTENT]),
-    assert_list([[{name, <<"test0">>},
-                 {consumer_utilisation, null},
-                 {exclusive_consumer_tag, null},
-                 {recoverable_slaves, null}]], http_get("/queues", ?OK)),
-    http_delete("/queues/%2f/test0", ?NO_CONTENT),
-    http_delete("/vhosts/vh1", ?NO_CONTENT),
-    ok.
-
-columns_test() ->
-    http_put("/queues/%2f/test", [{arguments, [{<<"foo">>, <<"bar">>}]}],
-             [?CREATED, ?NO_CONTENT]),
-    [[{name, <<"test">>}, {arguments, [{foo, <<"bar">>}]}]] =
-        http_get("/queues?columns=arguments.foo,name", ?OK),
-    [{name, <<"test">>}, {arguments, [{foo, <<"bar">>}]}] =
-        http_get("/queues/%2f/test?columns=arguments.foo,name", ?OK),
-    http_delete("/queues/%2f/test", ?NO_CONTENT),
-    ok.
-
-get_test() ->
-    %% Real world example...
-    Headers = [{<<"x-forwarding">>, array,
-                [{table,
-                  [{<<"uri">>, longstr,
-                    <<"amqp://localhost/%2f/upstream">>}]}]}],
-    http_put("/queues/%2f/myqueue", [], [?CREATED, ?NO_CONTENT]),
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    Publish = fun (Payload) ->
-                      amqp_channel:cast(
-                        Ch, #'basic.publish'{exchange = <<>>,
-                                             routing_key = <<"myqueue">>},
-                        #amqp_msg{props = #'P_basic'{headers = Headers},
-                                  payload = Payload})
-              end,
-    Publish(<<"1aaa">>),
-    Publish(<<"2aaa">>),
-    Publish(<<"3aaa">>),
-    amqp_connection:close(Conn),
-    [Msg] = http_post("/queues/%2f/myqueue/get", [{requeue,  false},
-                                                  {count,    1},
-                                                  {encoding, auto},
-                                                  {truncate, 1}], ?OK),
-    false         = pget(redelivered, Msg),
-    <<>>          = pget(exchange,    Msg),
-    <<"myqueue">> = pget(routing_key, Msg),
-    <<"1">>       = pget(payload,     Msg),
-    [{'x-forwarding',
-      [[{uri,<<"amqp://localhost/%2f/upstream">>}]]}] =
-        pget(headers, pget(properties, Msg)),
-
-    [M2, M3] = http_post("/queues/%2f/myqueue/get", [{requeue,  true},
-                                                     {count,    5},
-                                                     {encoding, auto}], ?OK),
-    <<"2aaa">> = pget(payload, M2),
-    <<"3aaa">> = pget(payload, M3),
-    2 = length(http_post("/queues/%2f/myqueue/get", [{requeue,  false},
-                                                     {count,    5},
-                                                     {encoding, auto}], ?OK)),
-    [] = http_post("/queues/%2f/myqueue/get", [{requeue,  false},
-                                               {count,    5},
-                                               {encoding, auto}], ?OK),
-    http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
-    ok.
-
-get_fail_test() ->
-    http_put("/users/myuser", [{password, <<"password">>},
-                               {tags, <<"management">>}], ?NO_CONTENT),
-    http_put("/queues/%2f/myqueue", [], [?CREATED, ?NO_CONTENT]),
-    http_post("/queues/%2f/myqueue/get",
-              [{requeue,  false},
-               {count,    1},
-               {encoding, auto}], "myuser", "password", ?NOT_AUTHORISED),
-    http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
-    http_delete("/users/myuser", ?NO_CONTENT),
-    ok.
-
-publish_test() ->
-    Headers = [{'x-forwarding', [[{uri,<<"amqp://localhost/%2f/upstream">>}]]}],
-    Msg = msg(<<"myqueue">>, Headers, <<"Hello world">>),
-    http_put("/queues/%2f/myqueue", [], [?CREATED, ?NO_CONTENT]),
-    ?assertEqual([{routed, true}],
-                 http_post("/exchanges/%2f/amq.default/publish", Msg, ?OK)),
-    [Msg2] = http_post("/queues/%2f/myqueue/get", [{requeue,  false},
-                                                   {count,    1},
-                                                   {encoding, auto}], ?OK),
-    assert_item(Msg, Msg2),
-    http_post("/exchanges/%2f/amq.default/publish", Msg2, ?OK),
-    [Msg3] = http_post("/queues/%2f/myqueue/get", [{requeue,  false},
-                                                   {count,    1},
-                                                   {encoding, auto}], ?OK),
-    assert_item(Msg, Msg3),
-    http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
-    ok.
-
-publish_accept_json_test() ->
-    Headers = [{'x-forwarding', [[{uri, <<"amqp://localhost/%2f/upstream">>}]]}],
-    Msg = msg(<<"myqueue">>, Headers, <<"Hello world">>),
-    http_put("/queues/%2f/myqueue", [], [?CREATED, ?NO_CONTENT]),
-    ?assertEqual([{routed, true}],
-                http_post_accept_json("/exchanges/%2f/amq.default/publish", 
-                                      Msg, ?OK)),
-
-    [Msg2] = http_post_accept_json("/queues/%2f/myqueue/get", 
-                                  [{requeue, false},
-                                   {count, 1},
-                                   {encoding, auto}], ?OK),
-    assert_item(Msg, Msg2),
-    http_post_accept_json("/exchanges/%2f/amq.default/publish", Msg2, ?OK),
-    [Msg3] = http_post_accept_json("/queues/%2f/myqueue/get", 
-                                  [{requeue, false},
-                                   {count, 1},
-                                   {encoding, auto}], ?OK),
-    assert_item(Msg, Msg3),
-    http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
-    ok.
-
-publish_fail_test() ->
-    Msg = msg(<<"myqueue">>, [], <<"Hello world">>),
-    http_put("/queues/%2f/myqueue", [], [?CREATED, ?NO_CONTENT]),
-    http_put("/users/myuser", [{password, <<"password">>},
-                               {tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
-    http_post("/exchanges/%2f/amq.default/publish", Msg, "myuser", "password",
-              ?NOT_AUTHORISED),
-    Msg2 = [{exchange,         <<"">>},
-            {routing_key,      <<"myqueue">>},
-            {properties,       [{user_id, <<"foo">>}]},
-            {payload,          <<"Hello world">>},
-            {payload_encoding, <<"string">>}],
-    http_post("/exchanges/%2f/amq.default/publish", Msg2, ?BAD_REQUEST),
-    Msg3 = [{exchange,         <<"">>},
-            {routing_key,      <<"myqueue">>},
-            {properties,       []},
-            {payload,          [<<"not a string">>]},
-            {payload_encoding, <<"string">>}],
-    http_post("/exchanges/%2f/amq.default/publish", Msg3, ?BAD_REQUEST),
-    MsgTemplate = [{exchange,         <<"">>},
-                   {routing_key,      <<"myqueue">>},
-                   {payload,          <<"Hello world">>},
-                   {payload_encoding, <<"string">>}],
-    [http_post("/exchanges/%2f/amq.default/publish",
-               [{properties, [BadProp]} | MsgTemplate], ?BAD_REQUEST)
-     || BadProp <- [{priority,   <<"really high">>},
-                    {timestamp,  <<"recently">>},
-                    {expiration, 1234}]],
-    http_delete("/users/myuser", ?NO_CONTENT),
-    ok.
-
-publish_base64_test() ->
-    Msg     = msg(<<"myqueue">>, [], <<"YWJjZA==">>, <<"base64">>),
-    BadMsg1 = msg(<<"myqueue">>, [], <<"flibble">>,  <<"base64">>),
-    BadMsg2 = msg(<<"myqueue">>, [], <<"YWJjZA==">>, <<"base99">>),
-    http_put("/queues/%2f/myqueue", [], [?CREATED, ?NO_CONTENT]),
-    http_post("/exchanges/%2f/amq.default/publish", Msg, ?OK),
-    http_post("/exchanges/%2f/amq.default/publish", BadMsg1, ?BAD_REQUEST),
-    http_post("/exchanges/%2f/amq.default/publish", BadMsg2, ?BAD_REQUEST),
-    [Msg2] = http_post("/queues/%2f/myqueue/get", [{requeue,  false},
-                                                   {count,    1},
-                                                   {encoding, auto}], ?OK),
-    ?assertEqual(<<"abcd">>, pget(payload, Msg2)),
-    http_delete("/queues/%2f/myqueue", ?NO_CONTENT),
-    ok.
-
-publish_unrouted_test() ->
-    Msg = msg(<<"hmmm">>, [], <<"Hello world">>),
-    ?assertEqual([{routed, false}],
-                 http_post("/exchanges/%2f/amq.default/publish", Msg, ?OK)).
-
-if_empty_unused_test() ->
-    http_put("/exchanges/%2f/test", [], [?CREATED, ?NO_CONTENT]),
-    http_put("/queues/%2f/test", [], [?CREATED, ?NO_CONTENT]),
-    http_post("/bindings/%2f/e/test/q/test", [], [?CREATED, ?NO_CONTENT]),
-    http_post("/exchanges/%2f/amq.default/publish",
-              msg(<<"test">>, [], <<"Hello world">>), ?OK),
-    http_delete("/queues/%2f/test?if-empty=true", ?BAD_REQUEST),
-    http_delete("/exchanges/%2f/test?if-unused=true", ?BAD_REQUEST),
-    http_delete("/queues/%2f/test/contents", ?NO_CONTENT),
-
-    {Conn, _ConnPath, _ChPath, _ConnChPath} = get_conn("guest", "guest"),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    amqp_channel:subscribe(Ch, #'basic.consume'{queue = <<"test">> }, self()),
-    http_delete("/queues/%2f/test?if-unused=true", ?BAD_REQUEST),
-    amqp_connection:close(Conn),
-
-    http_delete("/queues/%2f/test?if-empty=true", ?NO_CONTENT),
-    http_delete("/exchanges/%2f/test?if-unused=true", ?NO_CONTENT),
-    passed.
-
-parameters_test() ->
-    rabbit_runtime_parameters_test:register(),
-
-    http_put("/parameters/test/%2f/good", [{value, <<"ignore">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/parameters/test/%2f/maybe", [{value, <<"good">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/parameters/test/%2f/maybe", [{value, <<"bad">>}], ?BAD_REQUEST),
-    http_put("/parameters/test/%2f/bad", [{value, <<"good">>}], ?BAD_REQUEST),
-    http_put("/parameters/test/um/good", [{value, <<"ignore">>}], ?NOT_FOUND),
-
-    Good = [{vhost,     <<"/">>},
-            {component, <<"test">>},
-            {name,      <<"good">>},
-            {value,     <<"ignore">>}],
-    Maybe = [{vhost,     <<"/">>},
-             {component, <<"test">>},
-             {name,      <<"maybe">>},
-             {value,     <<"good">>}],
-    List = [Good, Maybe],
-
-    assert_list(List, http_get("/parameters")),
-    assert_list(List, http_get("/parameters/test")),
-    assert_list(List, http_get("/parameters/test/%2f")),
-    assert_list([],   http_get("/parameters/oops")),
-    http_get("/parameters/test/oops", ?NOT_FOUND),
-
-    assert_item(Good,  http_get("/parameters/test/%2f/good", ?OK)),
-    assert_item(Maybe, http_get("/parameters/test/%2f/maybe", ?OK)),
-
-    http_delete("/parameters/test/%2f/good", ?NO_CONTENT),
-    http_delete("/parameters/test/%2f/maybe", ?NO_CONTENT),
-    http_delete("/parameters/test/%2f/bad", ?NOT_FOUND),
-
-    0 = length(http_get("/parameters")),
-    0 = length(http_get("/parameters/test")),
-    0 = length(http_get("/parameters/test/%2f")),
-    rabbit_runtime_parameters_test:unregister(),
-    ok.
-
-policy_test() ->
-    rabbit_runtime_parameters_test:register_policy_validator(),
-    PolicyPos  = [{vhost,      <<"/">>},
-                  {name,       <<"policy_pos">>},
-                  {pattern,    <<".*">>},
-                  {definition, [{testpos,[1,2,3]}]},
-                  {priority,   10}],
-    PolicyEven = [{vhost,      <<"/">>},
-                  {name,       <<"policy_even">>},
-                  {pattern,    <<".*">>},
-                  {definition, [{testeven,[1,2,3,4]}]},
-                  {priority,   10}],
-    http_put(
-      "/policies/%2f/policy_pos",
-      lists:keydelete(key, 1, PolicyPos),
-      [?CREATED, ?NO_CONTENT]),
-    http_put(
-      "/policies/%2f/policy_even",
-      lists:keydelete(key, 1, PolicyEven),
-      [?CREATED, ?NO_CONTENT]),
-    assert_item(PolicyPos,  http_get("/policies/%2f/policy_pos",  ?OK)),
-    assert_item(PolicyEven, http_get("/policies/%2f/policy_even", ?OK)),
-    List = [PolicyPos, PolicyEven],
-    assert_list(List, http_get("/policies",     ?OK)),
-    assert_list(List, http_get("/policies/%2f", ?OK)),
-
-    http_delete("/policies/%2f/policy_pos", ?NO_CONTENT),
-    http_delete("/policies/%2f/policy_even", ?NO_CONTENT),
-    0 = length(http_get("/policies")),
-    0 = length(http_get("/policies/%2f")),
-    rabbit_runtime_parameters_test:unregister_policy_validator(),
-    ok.
-
-policy_permissions_test() ->
-    rabbit_runtime_parameters_test:register(),
-
-    http_put("/users/admin",  [{password, <<"admin">>},
-                               {tags, <<"administrator">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/users/mon",    [{password, <<"monitor">>},
-                               {tags, <<"monitoring">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/users/policy", [{password, <<"policy">>},
-                               {tags, <<"policymaker">>}], [?CREATED, ?NO_CONTENT]),
-    http_put("/users/mgmt",   [{password, <<"mgmt">>},
-                               {tags, <<"management">>}], [?CREATED, ?NO_CONTENT]),
-    Perms = [{configure, <<".*">>},
-             {write,     <<".*">>},
-             {read,      <<".*">>}],
-    http_put("/vhosts/v", none, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/v/admin",  Perms, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/v/mon",    Perms, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/v/policy", Perms, [?CREATED, ?NO_CONTENT]),
-    http_put("/permissions/v/mgmt",   Perms, [?CREATED, ?NO_CONTENT]),
-
-    Policy = [{pattern,    <<".*">>},
-              {definition, [{<<"ha-mode">>, <<"all">>}]}],
-    Param = [{value, <<"">>}],
-
-    http_put("/policies/%2f/HA", Policy, [?CREATED, ?NO_CONTENT]),
-    http_put("/parameters/test/%2f/good", Param, [?CREATED, ?NO_CONTENT]),
-
-    Pos = fun (U) ->
-                  Expected = case U of "admin" -> [?CREATED, ?NO_CONTENT]; _ -> ?NO_CONTENT end,
-                  http_put("/policies/v/HA",        Policy, U, U, Expected),
-                  http_put(
-                    "/parameters/test/v/good",       Param, U, U, ?NO_CONTENT),
-                  1 = length(http_get("/policies",          U, U, ?OK)),
-                  1 = length(http_get("/parameters/test",   U, U, ?OK)),
-                  1 = length(http_get("/parameters",        U, U, ?OK)),
-                  1 = length(http_get("/policies/v",        U, U, ?OK)),
-                  1 = length(http_get("/parameters/test/v", U, U, ?OK)),
-                  http_get("/policies/v/HA",                U, U, ?OK),
-                  http_get("/parameters/test/v/good",       U, U, ?OK)
-          end,
-    Neg = fun (U) ->
-                  http_put("/policies/v/HA",    Policy, U, U, ?NOT_AUTHORISED),
-                  http_put(
-                    "/parameters/test/v/good",   Param, U, U, ?NOT_AUTHORISED),
-                  http_put(
-                    "/parameters/test/v/admin",  Param, U, U, ?NOT_AUTHORISED),
-                  http_get("/policies",                 U, U, ?NOT_AUTHORISED),
-                  http_get("/policies/v",               U, U, ?NOT_AUTHORISED),
-                  http_get("/parameters",               U, U, ?NOT_AUTHORISED),
-                  http_get("/parameters/test",          U, U, ?NOT_AUTHORISED),
-                  http_get("/parameters/test/v",        U, U, ?NOT_AUTHORISED),
-                  http_get("/policies/v/HA",            U, U, ?NOT_AUTHORISED),
-                  http_get("/parameters/test/v/good",   U, U, ?NOT_AUTHORISED)
-          end,
-    AlwaysNeg =
-        fun (U) ->
-                http_put("/policies/%2f/HA",  Policy, U, U, ?NOT_AUTHORISED),
-                http_put(
-                  "/parameters/test/%2f/good", Param, U, U, ?NOT_AUTHORISED),
-                http_get("/policies/%2f/HA",          U, U, ?NOT_AUTHORISED),
-                http_get("/parameters/test/%2f/good", U, U, ?NOT_AUTHORISED)
-        end,
-
-    [Neg(U) || U <- ["mon", "mgmt"]],
-    [Pos(U) || U <- ["admin", "policy"]],
-    [AlwaysNeg(U) || U <- ["mon", "mgmt", "admin", "policy"]],
-
-    %% This one is deliberately different between admin and policymaker.
-    http_put("/parameters/test/v/admin", Param, "admin", "admin", [?CREATED, ?NO_CONTENT]),
-    http_put("/parameters/test/v/admin", Param, "policy", "policy",
-             ?BAD_REQUEST),
-
-    http_delete("/vhosts/v", ?NO_CONTENT),
-    http_delete("/users/admin", ?NO_CONTENT),
-    http_delete("/users/mon", ?NO_CONTENT),
-    http_delete("/users/policy", ?NO_CONTENT),
-    http_delete("/users/mgmt", ?NO_CONTENT),
-    http_delete("/policies/%2f/HA", ?NO_CONTENT),
-
-    rabbit_runtime_parameters_test:unregister(),
-    ok.
-
-issue67_test()->
-    {ok, {{_, 401, _}, Headers, _}} = req(get, "/queues",
-                        [auth_header("user_no_access", "password_no_access")]),
-    ?assertEqual("application/json",
-      proplists:get_value("content-type",Headers)),
-    ok.
-
-extensions_test() ->
-    [[{javascript,<<"dispatcher.js">>}]] = http_get("/extensions", ?OK),
-    ok.
-
-%%---------------------------------------------------------------------------
-
-msg(Key, Headers, Body) ->
-    msg(Key, Headers, Body, <<"string">>).
-
-msg(Key, Headers, Body, Enc) ->
-    [{exchange,         <<"">>},
-     {routing_key,      Key},
-     {properties,       [{delivery_mode, 2},
-                         {headers,       Headers}]},
-     {payload,          Body},
-     {payload_encoding, Enc}].
-
-local_port(Conn) ->
-    [{sock, Sock}] = amqp_connection:info(Conn, [sock]),
-    {ok, Port} = inet:port(Sock),
-    Port.
-
-%%---------------------------------------------------------------------------
-http_get(Path) ->
-    http_get(Path, ?OK).
-
-http_get(Path, CodeExp) ->
-    http_get(Path, "guest", "guest", CodeExp).
-
-http_get(Path, User, Pass, CodeExp) ->
-    {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
-        req(get, Path, [auth_header(User, Pass)]),
-    assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
-    decode(CodeExp, Headers, ResBody).
-
-http_put(Path, List, CodeExp) ->
-    http_put_raw(Path, format_for_upload(List), CodeExp).
-
-http_put(Path, List, User, Pass, CodeExp) ->
-    http_put_raw(Path, format_for_upload(List), User, Pass, CodeExp).
-
-http_post(Path, List, CodeExp) ->
-    http_post_raw(Path, format_for_upload(List), CodeExp).
-
-http_post(Path, List, User, Pass, CodeExp) ->
-    http_post_raw(Path, format_for_upload(List), User, Pass, CodeExp).
-
-http_post_accept_json(Path, List, CodeExp) ->
-    http_post_accept_json(Path, List, "guest", "guest", CodeExp).
-
-http_post_accept_json(Path, List, User, Pass, CodeExp) ->
-    http_post_raw(Path, format_for_upload(List), User, Pass, CodeExp, 
-                 [{"Accept", "application/json"}]).
-
-format_for_upload(none) ->
-    <<"">>;
-format_for_upload(List) ->
-    iolist_to_binary(mochijson2:encode({struct, List})).
-
-http_put_raw(Path, Body, CodeExp) ->
-    http_upload_raw(put, Path, Body, "guest", "guest", CodeExp, []).
-
-http_put_raw(Path, Body, User, Pass, CodeExp) ->
-    http_upload_raw(put, Path, Body, User, Pass, CodeExp, []).
-
-
-http_post_raw(Path, Body, CodeExp) ->
-    http_upload_raw(post, Path, Body, "guest", "guest", CodeExp, []).
-
-http_post_raw(Path, Body, User, Pass, CodeExp) ->
-    http_upload_raw(post, Path, Body, User, Pass, CodeExp, []).
-
-http_post_raw(Path, Body, User, Pass, CodeExp, MoreHeaders) ->
-    http_upload_raw(post, Path, Body, User, Pass, CodeExp, MoreHeaders).
-
-
-http_upload_raw(Type, Path, Body, User, Pass, CodeExp, MoreHeaders) ->
-    {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
-       req(Type, Path, [auth_header(User, Pass)] ++ MoreHeaders, Body),
-    assert_code(CodeExp, CodeAct, Type, Path, ResBody),
-    decode(CodeExp, Headers, ResBody).
-
-http_delete(Path, CodeExp) ->
-    http_delete(Path, "guest", "guest", CodeExp).
-
-http_delete(Path, User, Pass, CodeExp) ->
-    {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
-        req(delete, Path, [auth_header(User, Pass)]),
-    assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody),
-    decode(CodeExp, Headers, ResBody).
-
-assert_code(CodesExpected, CodeAct, Type, Path, Body) when is_list(CodesExpected) ->
-    case lists:member(CodeAct, CodesExpected) of
-        true ->
-            ok;
-        false ->
-            throw({expected, CodesExpected, got, CodeAct, type, Type,
-                   path, Path, body, Body})
-    end;
-assert_code(CodeExp, CodeAct, Type, Path, Body) ->
-    case CodeExp of
-        CodeAct -> ok;
-        _       -> throw({expected, CodeExp, got, CodeAct, type, Type,
-                          path, Path, body, Body})
-    end.
-
-req(Type, Path, Headers) ->
-    httpc:request(Type, {?PREFIX ++ Path, Headers}, ?HTTPC_OPTS, []).
-
-req(Type, Path, Headers, Body) ->
-    httpc:request(Type, {?PREFIX ++ Path, Headers, "application/json", Body},
-                  ?HTTPC_OPTS, []).
-
-decode(?OK, _Headers,  ResBody) -> cleanup(mochijson2:decode(ResBody));
-decode(_,    Headers, _ResBody) -> Headers.
-
-cleanup(L) when is_list(L) ->
-    [cleanup(I) || I <- L];
-cleanup({struct, I}) ->
-    cleanup(I);
-cleanup({K, V}) when is_binary(K) ->
-    {list_to_atom(binary_to_list(K)), cleanup(V)};
-cleanup(I) ->
-    I.
-
-auth_header(Username, Password) ->
-    {"Authorization",
-     "Basic " ++ binary_to_list(base64:encode(Username ++ ":" ++ Password))}.
-
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_util.erl b/rabbitmq-server/deps/rabbitmq_management/test/src/rabbit_mgmt_test_util.erl
deleted file mode 100644 (file)
index 1e53d89..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-%%   The contents of this file are subject to the Mozilla Public License
-%%   Version 1.1 (the "License"); you may not use this file except in
-%%   compliance with the License. You may obtain a copy of the License at
-%%   http://www.mozilla.org/MPL/
-%%
-%%   Software distributed under the License is distributed on an "AS IS"
-%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%%   License for the specific language governing rights and limitations
-%%   under the License.
-%%
-%%   The Original Code is RabbitMQ Management Console.
-%%
-%%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2012 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(rabbit_mgmt_test_util).
-
--export([assert_list/2, assert_item/2, test_item/2]).
-
-assert_list(Exp, Act) ->
-    case length(Exp) == length(Act) of
-        true  -> ok;
-        false -> throw({expected, Exp, actual, Act})
-    end,
-    [case length(lists:filter(fun(ActI) -> test_item(ExpI, ActI) end, Act)) of
-         1 -> ok;
-         N -> throw({found, N, ExpI, in, Act})
-     end || ExpI <- Exp].
-
-assert_item(Exp, Act) ->
-    case test_item0(Exp, Act) of
-        [] -> ok;
-        Or -> throw(Or)
-    end.
-
-test_item(Exp, Act) ->
-    case test_item0(Exp, Act) of
-        [] -> true;
-        _  -> false
-    end.
-
-test_item0(Exp, Act) ->
-    [{did_not_find, ExpI, in, Act} || ExpI <- Exp,
-                                      not lists:member(ExpI, Act)].
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/src/rabbitmqadmin-test-wrapper.sh b/rabbitmq-server/deps/rabbitmq_management/test/src/rabbitmqadmin-test-wrapper.sh
deleted file mode 100755 (executable)
index d684ec9..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh -e
-TWO=$(python2 -c 'import sys;print(sys.version_info[0])')
-THREE=$(python3 -c 'import sys;print(sys.version_info[0])')
-
-if [ $TWO != 2 ] ; then
-    echo Python 2 not found!
-    exit 1
-fi
-
-if [ $THREE != 3 ] ; then
-    echo Python 3 not found!
-    exit 1
-fi
-
-echo
-echo ----------------------
-echo Testing under Python 2
-echo ----------------------
-
-python2 $(dirname $0)/rabbitmqadmin-test.py
-
-echo
-echo ----------------------
-echo Testing under Python 3
-echo ----------------------
-
-python3 $(dirname $0)/rabbitmqadmin-test.py
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/src/rabbitmqadmin-test.py b/rabbitmq-server/deps/rabbitmq_management/test/src/rabbitmqadmin-test.py
deleted file mode 100755 (executable)
index c48144e..0000000
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/env python
-
-import unittest
-import os
-import os.path
-import socket
-import subprocess
-import sys
-import shutil
-
-# TODO test: SSL, depth, config file, encodings(?), completion(???)
-
-class TestRabbitMQAdmin(unittest.TestCase):
-    def test_no_args(self):
-        self.run_fail([])
-
-    def test_help(self):
-        self.run_success(['--help'])
-        self.run_success(['help', 'subcommands'])
-        self.run_success(['help', 'config'])
-        self.run_fail(['help', 'astronomy'])
-
-    def test_host(self):
-        self.run_success(['show', 'overview'])
-        self.run_success(['--host', 'localhost', 'show', 'overview'])
-        self.run_fail(['--host', 'some-host-that-does-not-exist', 'show', 'overview'])
-
-    def test_port(self):
-        # Test port selection
-        self.run_success(['--port', '15672', 'show', 'overview'])
-        # Test port not open
-        self.run_fail(['--port', '15673', 'show', 'overview'])
-        # Test port open but not talking HTTP
-        self.run_fail(['--port', '5672', 'show', 'overview'])
-
-    def test_config(self):
-        original_home = os.getenv('HOME')
-        tmpdir = os.getenv("TMPDIR") or os.getenv("TEMP") or "/tmp"
-        shutil.copyfile(os.path.dirname(__file__) + os.sep + "default-config",
-                        tmpdir + os.sep + ".rabbitmqadmin.conf")
-        os.environ['HOME'] = tmpdir
-
-        self.run_fail(['--config', '/tmp/no-such-config-file', 'show', 'overview'])
-
-        cf = os.path.dirname(__file__) + os.sep + "test-config"
-        self.run_success(['--config', cf, '--node', 'host_normal', 'show', 'overview'])
-
-        # test 'default node in the config file' where "default" uses an invalid host
-        self.run_fail(['--config', cf, 'show', 'overview'])
-        self.run_success(["show", "overview"])
-        self.run_fail(['--node', 'non_default', "show", "overview"])
-        os.environ['HOME'] = original_home
-
-    def test_user(self):
-        self.run_success(['--user', 'guest', '--password', 'guest', 'show', 'overview'])
-        self.run_fail(['--user', 'no', '--password', 'guest', 'show', 'overview'])
-        self.run_fail(['--user', 'guest', '--password', 'no', 'show', 'overview'])
-
-    def test_fmt_long(self):
-        self.assert_output("""
---------------------------------------------------------------------------------
-
-   name: /
-tracing: False
-
---------------------------------------------------------------------------------
-
-""", ['--format', 'long', 'list', 'vhosts', 'name', 'tracing'])
-
-    def test_fmt_kvp(self):
-        self.assert_output("""name="/" tracing="False"
-""", ['--format', 'kvp', 'list', 'vhosts', 'name', 'tracing'])
-
-    def test_fmt_tsv(self):
-        self.assert_output("""name     tracing
-/      False
-""", ['--format', 'tsv', 'list', 'vhosts', 'name', 'tracing'])
-
-    def test_fmt_table(self):
-        out = """+------+---------+
-| name | tracing |
-+------+---------+
-| /    | False   |
-+------+---------+
-"""
-        self.assert_output(out, ['list', 'vhosts', 'name', 'tracing'])
-        self.assert_output(out, ['--format', 'table', 'list', 'vhosts', 'name', 'tracing'])
-
-    def test_fmt_bash(self):
-        self.assert_output("""/
-""", ['--format', 'bash', 'list', 'vhosts', 'name', 'tracing'])
-
-    def test_vhosts(self):
-        self.assert_list(['/'], l('vhosts'))
-        self.run_success(['declare', 'vhost', 'name=foo'])
-        self.assert_list(['/', 'foo'], l('vhosts'))
-        self.run_success(['delete', 'vhost', 'name=foo'])
-        self.assert_list(['/'], l('vhosts'))
-
-    def test_users(self):
-        self.assert_list(['guest'], l('users'))
-        self.run_fail(['declare', 'user', 'name=foo'])
-        self.run_success(['declare', 'user', 'name=foo', 'password=pass', 'tags='])
-        self.assert_list(['foo', 'guest'], l('users'))
-        self.run_success(['delete', 'user', 'name=foo'])
-        self.assert_list(['guest'], l('users'))
-
-    def test_permissions(self):
-        self.run_success(['declare', 'vhost', 'name=foo'])
-        self.run_success(['declare', 'user', 'name=bar', 'password=pass', 'tags='])
-        self.assert_table([['guest', '/']], ['list', 'permissions', 'user', 'vhost'])
-        self.run_success(['declare', 'permission', 'user=bar', 'vhost=foo', 'configure=.*', 'write=.*', 'read=.*'])
-        self.assert_table([['guest', '/'], ['bar', 'foo']], ['list', 'permissions', 'user', 'vhost'])
-        self.run_success(['delete', 'user', 'name=bar'])
-        self.run_success(['delete', 'vhost', 'name=foo'])
-
-    def test_alt_vhost(self):
-        self.run_success(['declare', 'vhost', 'name=foo'])
-        self.run_success(['declare', 'permission', 'user=guest', 'vhost=foo', 'configure=.*', 'write=.*', 'read=.*'])
-        self.run_success(['declare', 'queue', 'name=in_/'])
-        self.run_success(['--vhost', 'foo', 'declare', 'queue', 'name=in_foo'])
-        self.assert_table([['/', 'in_/'], ['foo', 'in_foo']], ['list', 'queues', 'vhost', 'name'])
-        self.run_success(['--vhost', 'foo', 'delete', 'queue', 'name=in_foo'])
-        self.run_success(['delete', 'queue', 'name=in_/'])
-        self.run_success(['delete', 'vhost', 'name=foo'])
-
-    def test_exchanges(self):
-        self.run_success(['declare', 'exchange', 'name=foo', 'type=direct'])
-        self.assert_list(['', 'amq.direct', 'amq.fanout', 'amq.headers', 'amq.match', 'amq.rabbitmq.log', 'amq.rabbitmq.trace', 'amq.topic', 'foo'], l('exchanges'))
-        self.run_success(['delete', 'exchange', 'name=foo'])
-
-    def test_queues(self):
-        self.run_success(['declare', 'queue', 'name=foo'])
-        self.assert_list(['foo'], l('queues'))
-        self.run_success(['delete', 'queue', 'name=foo'])
-
-    def test_bindings(self):
-        self.run_success(['declare', 'queue', 'name=foo'])
-        self.run_success(['declare', 'binding', 'source=amq.direct', 'destination=foo', 'destination_type=queue', 'routing_key=test'])
-        self.assert_table([['', 'foo', 'queue', 'foo'], ['amq.direct', 'foo', 'queue', 'test']], ['list', 'bindings', 'source', 'destination', 'destination_type', 'routing_key'])
-        self.run_success(['delete', 'queue', 'name=foo'])
-
-    def test_policies(self):
-        self.run_success(['declare', 'policy', 'name=ha', 'pattern=.*', 'definition={"ha-mode":"all"}'])
-        self.assert_table([['ha', '/', '.*', '{"ha-mode": "all"}']], ['list', 'policies', 'name', 'vhost', 'pattern', 'definition'])
-        self.run_success(['delete', 'policy', 'name=ha'])
-
-    def test_parameters(self):
-        self.ctl(['eval', 'rabbit_runtime_parameters_test:register().'])
-        self.run_success(['declare', 'parameter', 'component=test', 'name=good', 'value=123'])
-        self.assert_table([['test', 'good', '/', '123']], ['list', 'parameters', 'component', 'name', 'vhost', 'value'])
-        self.run_success(['delete', 'parameter', 'component=test', 'name=good'])
-        self.ctl(['eval', 'rabbit_runtime_parameters_test:unregister().'])
-
-    def test_publish(self):
-        self.run_success(['declare', 'queue', 'name=test'])
-        self.run_success(['publish', 'routing_key=test', 'payload=test_1'])
-        self.run_success(['publish', 'routing_key=test', 'payload=test_2'])
-        self.run_success(['publish', 'routing_key=test'], stdin=b'test_3')
-        self.assert_table([exp_msg('test', 2, False, 'test_1')], ['get', 'queue=test', 'requeue=false'])
-        self.assert_table([exp_msg('test', 1, False, 'test_2')], ['get', 'queue=test', 'requeue=true'])
-        self.assert_table([exp_msg('test', 1, True,  'test_2')], ['get', 'queue=test', 'requeue=false'])
-        self.assert_table([exp_msg('test', 0, False, 'test_3')], ['get', 'queue=test', 'requeue=false'])
-        self.run_success(['publish', 'routing_key=test'], stdin=b'test_4')
-        filename = os.path.join(os.getenv("NODE_TMPDIR"), 'get.txt')
-        ensure_dir(filename)
-        self.run_success(['get', 'queue=test', 'requeue=false', 'payload_file=' + filename])
-        with open(filename) as f:
-            self.assertEqual('test_4', f.read())
-        os.remove(filename)
-        self.run_success(['delete', 'queue', 'name=test'])
-
-    def test_ignore_vhost(self):
-        self.run_success(['--vhost', '/', 'show', 'overview'])
-        self.run_success(['--vhost', '/', 'list', 'users'])
-        self.run_success(['--vhost', '/', 'list', 'vhosts'])
-        self.run_success(['--vhost', '/', 'list', 'nodes'])
-        self.run_success(['--vhost', '/', 'list', 'permissions'])
-        self.run_success(['--vhost', '/', 'declare', 'user', 'name=foo', 'password=pass', 'tags='])
-        self.run_success(['delete', 'user', 'name=foo'])
-
-    def test_sort(self):
-        self.run_success(['declare', 'queue', 'name=foo'])
-        self.run_success(['declare', 'binding', 'source=amq.direct', 'destination=foo', 'destination_type=queue', 'routing_key=bbb'])
-        self.run_success(['declare', 'binding', 'source=amq.topic', 'destination=foo', 'destination_type=queue', 'routing_key=aaa'])
-        self.assert_table([['', 'foo'], ['amq.direct', 'bbb'], ['amq.topic', 'aaa']], ['--sort', 'source', 'list', 'bindings', 'source', 'routing_key'])
-        self.assert_table([['amq.topic', 'aaa'], ['amq.direct', 'bbb'], ['', 'foo']], ['--sort', 'routing_key', 'list', 'bindings', 'source', 'routing_key'])
-        self.assert_table([['amq.topic', 'aaa'], ['amq.direct', 'bbb'], ['', 'foo']], ['--sort', 'source', '--sort-reverse', 'list', 'bindings', 'source', 'routing_key'])
-        self.run_success(['delete', 'queue', 'name=foo'])
-
-    # ---------------------------------------------------------------------------
-
-    def run_success(self, args, **kwargs):
-        (stdout, ret) = self.admin(args, **kwargs)
-        if ret != 0:
-            self.fail(stdout)
-
-    def run_fail(self, args):
-        (stdout, ret) = self.admin(args)
-        if ret == 0:
-            self.fail(stdout)
-
-    def assert_output(self, expected, args):
-        self.assertEqual(expected, self.admin(args)[0])
-
-    def assert_list(self, expected, args0):
-        args = ['-f', 'tsv', '-q']
-        args.extend(args0)
-        self.assertEqual(expected, self.admin(args)[0].splitlines())
-
-    def assert_table(self, expected, args0):
-        args = ['-f', 'tsv', '-q']
-        args.extend(args0)
-        self.assertEqual(expected, [l.split('\t') for l in self.admin(args)[0].splitlines()])
-
-    def admin(self, args0, stdin=None):
-        args = ['python{0}'.format(sys.version_info[0]),
-                norm(os.getenv('RABBITMQADMIN'))]
-        args.extend(args0)
-        return run(args, stdin)
-
-    def ctl(self, args0, stdin=None):
-        args = [norm(os.path.join(os.getenv('DEPS_DIR'),
-            'rabbit/scripts/rabbitmqctl')),
-            '-n', os.getenv('RABBITMQ_NODENAME')]
-        args.extend(args0)
-        (stdout, ret) = run(args, stdin)
-        if ret != 0:
-            self.fail(stdout)
-
-def norm(cmd):
-    return os.path.normpath(os.path.join(os.getcwd(), sys.argv[0], cmd))
-
-def run(args, stdin):
-    proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    (stdout, stderr) = proc.communicate(stdin)
-    returncode = proc.returncode
-    res = stdout.decode('utf-8') + stderr.decode('utf-8')
-    return (res, returncode)
-
-def l(thing):
-    return ['list', thing, 'name']
-
-def exp_msg(key, count, redelivered, payload):
-    # routing_key, exchange, message_count, payload, payload_bytes, payload_encoding, properties, redelivered
-    return [key, '', str(count), payload, str(len(payload)), 'string', '', str(redelivered)]
-
-def ensure_dir(f):
-    d = os.path.dirname(f)
-    if not os.path.exists(d):
-        os.makedirs(d)
-
-if __name__ == '__main__':
-    print("\nrabbitmqadmin tests\n===================\n")
-    suite = unittest.TestLoader().loadTestsFromTestCase(TestRabbitMQAdmin)
-    results = unittest.TextTestRunner(verbosity=2).run(suite)
-    if not results.wasSuccessful():
-        sys.exit(1)
diff --git a/rabbitmq-server/deps/rabbitmq_management/test/src/test-config b/rabbitmq-server/deps/rabbitmq_management/test/src/test-config
deleted file mode 100644 (file)
index 93322e7..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-# rabbitmqadmin.conf.example START
-
-[host_normal]
-hostname = localhost
-port = 15672
-username = guest
-password = guest
-declare_vhost = / # Used as default for declare / delete only
-vhost = /         # Used as default for declare / delete / list
-
-[default]
-hostname = localhost
-port = 99999
-username = guest
-password = guest
diff --git a/rabbitmq-server/deps/rabbitmq_management_agent/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_management_agent/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index 6134b967113470ed23cbc067b5f1cd51b41eedb5..95545a3d0c23d6dcade4b0030a7bcc96a1802dc1 100644 (file)
@@ -16,6 +16,8 @@
 
 -module(rabbit_mgmt_db_handler).
 
+-include_lib("rabbit_common/include/rabbit.hrl").
+
 %% Make sure our database is hooked in *before* listening on the network or
 %% recovering queues (i.e. so there can't be any events fired before it starts).
 -rabbit_boot_step({rabbit_mgmt_db_handler,
@@ -93,8 +95,17 @@ init([]) ->
 handle_call(_Request, State) ->
     {ok, not_understood, State}.
 
+handle_event(#event{type = Type} = Event, State) when Type == channel_stats;
+                                                      Type == channel_created;
+                                                      Type == channel_closed ->
+    gen_server:cast({global, rabbit_mgmt_channel_stats_collector}, {event, Event}),
+    {ok, State};
+handle_event(#event{type = Type} = Event, State) when Type == queue_stats;
+                                                      Type == queue_deleted ->
+    gen_server:cast({global, rabbit_mgmt_queue_stats_collector}, {event, Event}),
+    {ok, State};
 handle_event(Event, State) ->
-    gen_server:cast({global, rabbit_mgmt_db}, {event, Event}),
+    gen_server:cast({global, rabbit_mgmt_event_collector}, {event, Event}),
     {ok, State}.
 
 handle_info(_Info, State) ->
index c21221aaa27306ee0d45ff5a041cf108f8ea196b..294cf6782559cd1309821916cfb3d2a5c29dd1d7 100644 (file)
                uptime, run_queue, processors, exchange_types,
                auth_mechanisms, applications, contexts,
                log_file, sasl_log_file, db_dir, config_files, net_ticktime,
-               enabled_plugins, persister_stats]).
+               enabled_plugins, persister_stats, gc_num, gc_bytes_reclaimed,
+               context_switches]).
 
 %%--------------------------------------------------------------------
 
--record(state, {fd_total, fhc_stats, fhc_stats_derived, node_owners}).
+-record(state, {
+    fd_total,
+    fhc_stats,
+    node_owners,
+    last_ts
+}).
 
 %%--------------------------------------------------------------------
 
@@ -196,7 +202,16 @@ i(auth_mechanisms, _State) ->
       fun (N) -> lists:member(list_to_atom(binary_to_list(N)), Mechanisms) end);
 i(applications,    _State) ->
     [format_application(A) ||
-        A <- lists:keysort(1, rabbit_misc:which_applications())].
+        A <- lists:keysort(1, rabbit_misc:which_applications())];
+i(gc_num, _State) ->
+    {GCs, _, _} = erlang:statistics(garbage_collection),
+    GCs;
+i(gc_bytes_reclaimed, _State) ->
+    {_, Words, _} = erlang:statistics(garbage_collection),
+    Words * erlang:system_info(wordsize);
+i(context_switches, _State) ->
+    {Sw, 0} = erlang:statistics(context_switches),
+    Sw.
 
 log_location(Type) ->
     case rabbit:log_location(Type) of
@@ -227,11 +242,8 @@ set_plugin_name(Name, Module) ->
     [{name, list_to_binary(atom_to_list(Name))} |
      proplists:delete(name, Module:description())].
 
-persister_stats(#state{fhc_stats         = FHC,
-                       fhc_stats_derived = FHCD}) ->
-    [{flatten_key(K), V} || {{_Op, Type} = K, V} <- FHC,
-                            Type =/= time] ++
-        [{flatten_key(K), V} || {K, V} <- FHCD].
+persister_stats(#state{fhc_stats = FHC}) ->
+    [{flatten_key(K), V} || {{_Op, _Type} = K, V} <- FHC].
 
 flatten_key({A, B}) ->
     list_to_atom(atom_to_list(A) ++ "_" ++ atom_to_list(B)).
@@ -345,7 +357,8 @@ code_change(_, State, _) -> {ok, State}.
 
 emit_update(State0) ->
     State = update_state(State0),
-    rabbit_event:notify(node_stats, infos(?KEYS, State)),
+    Stats = infos(?KEYS, State),
+    rabbit_event:notify(node_stats, Stats),
     erlang:send_after(?REFRESH_RATIO, self(), emit_update),
     emit_node_node_stats(State).
 
@@ -362,20 +375,8 @@ emit_node_node_stats(State = #state{node_owners = Owners}) ->
         {Node, _Owner, Stats} <- Links],
     State#state{node_owners = NewOwners}.
 
-update_state(State0 = #state{fhc_stats = FHC0}) ->
+update_state(State0) ->
+    %% Store raw data, the average operation time is calculated during querying
+    %% from the accumulated total
     FHC = file_handle_cache_stats:get(),
-    Avgs = [{{Op, avg_time}, avg_op_time(Op, V, FHC, FHC0)}
-            || {{Op, time}, V} <- FHC],
-    State0#state{fhc_stats         = FHC,
-                 fhc_stats_derived = Avgs}.
-
--define(MICRO_TO_MILLI, 1000).
-
-avg_op_time(Op, Time, FHC, FHC0) ->
-    Time0 = pget({Op, time}, FHC0),
-    TimeDelta = Time - Time0,
-    OpDelta = pget({Op, count}, FHC) - pget({Op, count}, FHC0),
-    case OpDelta of
-        0 -> 0;
-        _ -> (TimeDelta / OpDelta) / ?MICRO_TO_MILLI
-    end.
+    State0#state{fhc_stats = FHC}.
index 2493af71512cb9edcf8e67c28eca976aed3ae796..661f66e6d0ba38a436c14050557560504a8ec904 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_management_agent,
  [{description, "RabbitMQ Management Agent"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {mod, {rabbit_mgmt_agent_app, []}},
diff --git a/rabbitmq-server/deps/rabbitmq_management_visualiser/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_management_visualiser/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index 49e566fbb7c2b706bea879bbf2a517604d3aba55..d2df983ddfa2a1508ba9f7ade10a4d7c23f24cdc 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_management_visualiser,
  [{description, "RabbitMQ Visualiser"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {applications, [kernel, stdlib, rabbit, rabbitmq_management]}]}.
diff --git a/rabbitmq-server/deps/rabbitmq_mqtt/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_mqtt/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index 24abc42d45c164865474f148400fb9ebe788735e..7879f653ea9ca7ad6a9d687f2717f9157eee5e71 100644 (file)
@@ -2,7 +2,10 @@ PROJECT = rabbitmq_mqtt
 
 DEPS = amqp_client
 
-TEST_DEPS = rabbitmq_test rabbitmq_java_client
+TEST_DEPS = rabbit rabbitmq_java_client emqttc ct_helper
+dep_ct_helper = git https://github.com/extend/ct_helper.git master
+
+dep_emqttc = git https://github.com/emqtt/emqttc.git master
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
@@ -13,26 +16,6 @@ ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
 ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
-include erlang.mk
 
-# --------------------------------------------------------------------
-# Testing.
-# --------------------------------------------------------------------
-
-WITH_BROKER_TEST_MAKEVARS := \
-       RABBITMQ_CONFIG_FILE=$(TEST_TMPDIR)/etc/test
-WITH_BROKER_TEST_ENVVARS := \
-       SSL_CERTS_DIR=$(TEST_TMPDIR)/etc/certs
-WITH_BROKER_TEST_SCRIPTS := $(CURDIR)/test/test.sh
-WITH_BROKER_SETUP_SCRIPTS := $(CURDIR)/test/setup-rabbit-test.sh
-
-STANDALONE_TEST_COMMANDS := eunit:test(rabbit_mqtt_util)
-
-pre-standalone-tests:: test-tmpdir test-dist
-       $(verbose) rm -rf $(TEST_TMPDIR)/etc
-       $(exec_verbose) mkdir -p $(TEST_TMPDIR)/etc/certs
-       $(verbose) sed -E -e "s|%%CERTS_DIR%%|$(TEST_TMPDIR)/etc/certs|g" \
-               < test/src/test.config > $(TEST_TMPDIR)/etc/test.config
-       $(verbose) $(MAKE) -C $(DEPS_DIR)/rabbitmq_test/certs all PASSWORD=bunnychow \
-               DIR=$(TEST_TMPDIR)/etc/certs
-       $(verbose) cp test/src/rabbitmq_mqtt_standalone.app.src test/rabbitmq_mqtt.app
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
+include erlang.mk
index 327aacd4cb3911a4479225231687962091c38542..dbc99283e8953ca33b0a67b2cfd4e2117435ee1b 100644 (file)
 -record(state,      { socket,
                       conn_name,
                       await_recv,
+                      deferred_recv,
                       received_connect_frame,
                       connection_state,
                       keepalive,
                       keepalive_sup,
                       conserve,
                       parse_state,
-                      proc_state }).
+                      proc_state,
+                      connection,
+                      stats_timer }).
 
 %% processor state
 -record(proc_state, { socket,
@@ -42,6 +45,7 @@
                       channels,
                       connection,
                       exchange,
+                      adapter_info,
                       ssl_login_name,
                       %% Retained messages handler. See rabbit_mqtt_retainer_sup
                       %% and rabbit_mqtt_retainer.
index c80543dc95fe30f1c1b40078814cab99c9a94e6e..110199dfeeee3ce5dbbecedfa01ceaf32bbd9ee7 100644 (file)
 -define(QOS_1, 1).
 -define(QOS_2, 2).
 
--ifdef(use_specs).
-
 %% TODO
--type(message_id :: any()).
-
--type(mqtt_msg() :: #mqtt_msg {
-  retain :: boolean(),
-  qos :: QOS_0 | QOS_1 | QOS_2,
-  topic :: string(),
-  dup :: boolean(),
-  message_id :: message_id(),
-  payload :: binary()
-}).
-
--endif.
+-type message_id() :: any().
 
 -record(mqtt_frame, {fixed,
                      variable,
@@ -85,7 +72,8 @@
                               username,
                               password}).
 
--record(mqtt_frame_connack,  {return_code}).
+-record(mqtt_frame_connack,  {session_present,
+                              return_code}).
 
 -record(mqtt_frame_publish,  {topic_name,
                               message_id}).
 
 -record(mqtt_frame_other,    {other}).
 
--record(mqtt_msg,            {retain,
-                              qos,
-                              topic,
-                              dup,
-                              message_id,
-                              payload}).
+-record(mqtt_msg,            {retain :: boolean(),
+                              qos :: ?QOS_0 | ?QOS_1 | ?QOS_2,
+                              topic :: string(),
+                              dup :: boolean(),
+                              message_id :: message_id(),
+                              payload :: binary()}).
+
+-type mqtt_msg() :: #mqtt_msg{}.
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index c740248129cc98ff7ed3c6c98d277d67be78189c..a0ffec232618bf122ff65d52944c805f1d2032ba 100644 (file)
@@ -19,7 +19,7 @@
 -behaviour(supervisor2).
 -behaviour(ranch_protocol).
 
--define(MAX_WAIT, 16#ffffffff).
+-include_lib("rabbit_common/include/rabbit.hrl").
 
 -export([start_link/4, start_keepalive_link/0]).
 
@@ -38,7 +38,7 @@ start_link(Ref, Sock, _Transport, []) ->
                         SupPid,
                         {rabbit_mqtt_reader,
                          {rabbit_mqtt_reader, start_link, [KeepaliveSup, Ref, Sock]},
-                         intrinsic, ?MAX_WAIT, worker, [rabbit_mqtt_reader]}),
+                         intrinsic, ?WORKER_WAIT, worker, [rabbit_mqtt_reader]}),
     {ok, SupPid, ReaderPid}.
 
 start_keepalive_link() ->
index 3e9f57e0130474dcbbc04083a4e3c3d2b84fab4d..0b80925eb6fbfcffb9bd333fb3164b1e701843cb 100644 (file)
@@ -164,9 +164,10 @@ serialise_payload(undefined)           -> <<>>;
 serialise_payload(B) when is_binary(B) -> B.
 
 serialise_variable(#mqtt_frame_fixed   { type        = ?CONNACK } = Fixed,
-                   #mqtt_frame_connack { return_code = ReturnCode },
+                   #mqtt_frame_connack { session_present = SessionPresent,
+                                         return_code = ReturnCode },
                    <<>> = PayloadBin) ->
-    VariableBin = <<?RESERVED:8, ReturnCode:8>>,
+    VariableBin = <<?RESERVED:7, (opt(SessionPresent)):1, ReturnCode:8>>,
     serialise_fixed(Fixed, VariableBin, PayloadBin);
 
 serialise_variable(#mqtt_frame_fixed  { type       = SubAck } = Fixed,
index 3ebd1af03f86fb17194c6ede505656967099f5ca..2bb2b8ecfaecd3b80743920276294d9b6e9febbb 100644 (file)
@@ -16,7 +16,7 @@
 
 -module(rabbit_mqtt_processor).
 
--export([info/2, initial_state/2, initial_state/3,
+-export([info/2, initial_state/2, initial_state/4,
          process_frame/2, amqp_pub/2, amqp_callback/2, send_will/1,
          close_connection/1]).
 
         Frame = #mqtt_frame{ fixed = #mqtt_frame_fixed{ type = Type }}).
 
 initial_state(Socket, SSLLoginName) ->
-    initial_state(Socket, SSLLoginName, fun send_client/2).
-
-initial_state(Socket, SSLLoginName, SendFun) ->
+    initial_state(Socket, SSLLoginName,
+        adapter_info(Socket, 'MQTT'),
+        fun send_client/2).
+
+initial_state(Socket, SSLLoginName,
+              AdapterInfo0 = #amqp_adapter_info{additional_info = Extra},
+              SendFun) ->
+    %% MQTT connections use exactly one channel. The frame max is not
+    %% applicable and there is no way to know what client is used.
+    AdapterInfo = AdapterInfo0#amqp_adapter_info{additional_info = [
+        {channels, 1},
+        {channel_max, 1},
+        {frame_max, 0},
+        {client_properties,
+         [{<<"product">>, longstr, <<"MQTT client">>}]} | Extra]},
     #proc_state{ unacked_pubs   = gb_trees:empty(),
                  awaiting_ack   = gb_trees:empty(),
                  message_id     = 1,
@@ -43,6 +55,7 @@ initial_state(Socket, SSLLoginName, SendFun) ->
                  channels       = {undefined, undefined},
                  exchange       = rabbit_mqtt_util:env(exchange),
                  socket         = Socket,
+                 adapter_info   = AdapterInfo,
                  ssl_login_name = SSLLoginName,
                  send_fun       = SendFun }.
 
@@ -54,7 +67,10 @@ process_frame(#mqtt_frame{ fixed = #mqtt_frame_fixed{ type = Type }},
     {error, connect_expected, PState};
 process_frame(Frame = #mqtt_frame{ fixed = #mqtt_frame_fixed{ type = Type }},
               PState) ->
-    process_request(Type, Frame, PState).
+    case process_request(Type, Frame, PState) of
+        {ok, PState1} -> {ok, PState1, PState1#proc_state.connection};
+        Ret -> Ret
+    end.
 
 process_request(?CONNECT,
                 #mqtt_frame{ variable = #mqtt_frame_connect{
@@ -70,7 +86,7 @@ process_request(?CONNECT,
                    []    -> rabbit_mqtt_util:gen_client_id();
                    [_|_] -> ClientId0
                end,
-    {ReturnCode, PState1} =
+    {Return, PState1} =
         case {lists:member(ProtoVersion, proplists:get_keys(?PROTOCOL_NAMES)),
               ClientId0 =:= [] andalso CleanSess =:= false} of
             {false, _} ->
@@ -97,23 +113,31 @@ process_request(?CONNECT,
                                 #'basic.qos_ok'{} = amqp_channel:call(
                                   Ch, #'basic.qos'{prefetch_count = Prefetch}),
                                 rabbit_mqtt_reader:start_keepalive(self(), Keepalive),
-                                {?CONNACK_ACCEPT,
-                                 maybe_clean_sess(
-                                   PState #proc_state{ will_msg   = make_will_msg(Var),
-                                                       clean_sess = CleanSess,
-                                                       channels   = {Ch, undefined},
-                                                       connection = Conn,
-                                                       client_id  = ClientId,
-                                                       retainer_pid = RetainerPid,
-                                                       auth_state = AState})};
+                                {SP, ProcState} =
+                                    maybe_clean_sess(
+                                        PState #proc_state{
+                                            will_msg   = make_will_msg(Var),
+                                            clean_sess = CleanSess,
+                                            channels   = {Ch, undefined},
+                                            connection = Conn,
+                                            client_id  = ClientId,
+                                            retainer_pid = RetainerPid,
+                                            auth_state = AState}),
+                                {{?CONNACK_ACCEPT, SP}, ProcState};
                             ConnAck ->
                                 {ConnAck, PState}
                         end
                 end
         end,
+    {ReturnCode, SessionPresent} = case Return of
+        {?CONNACK_ACCEPT, _} = Return -> Return;
+        Return                        -> {Return, false}
+    end,
     SendFun(#mqtt_frame{ fixed    = #mqtt_frame_fixed{ type = ?CONNACK},
                          variable = #mqtt_frame_connack{
-                                     return_code = ReturnCode }}, PState1),
+                                     session_present = SessionPresent,
+                                     return_code = ReturnCode}},
+            PState1),
     {ok, PState1};
 
 process_request(?PUBACK,
@@ -381,9 +405,12 @@ delivery_qos(Tag, Headers,   #proc_state{ consumer_tags = {_, Tag} }) ->
         undefined   -> {?QOS_1, ?QOS_1}
     end.
 
-maybe_clean_sess(PState = #proc_state { clean_sess = false }) ->
+maybe_clean_sess(PState = #proc_state { clean_sess = false,
+                                        channels   = {Channel, _},
+                                        client_id  = ClientId }) ->
     {_Queue, PState1} = ensure_queue(?QOS_1, PState),
-    PState1;
+    SessionPresent = session_present(Channel, ClientId),
+    {SessionPresent, PState1};
 maybe_clean_sess(PState = #proc_state { clean_sess = true,
                                         connection = Conn,
                                         client_id  = ClientId }) ->
@@ -394,7 +421,16 @@ maybe_clean_sess(PState = #proc_state { clean_sess = true,
     catch
         exit:_Error -> ok
     end,
-    PState.
+    {false, PState}.
+
+session_present(Channel, ClientId)  ->
+    {_, QueueQ1} = rabbit_mqtt_util:subcription_queue_name(ClientId),
+    Declare = #'queue.declare'{queue   = QueueQ1,
+                               passive = true},
+    case amqp_channel:call(Channel, Declare) of
+        #'queue.declare_ok'{} -> true;
+        _                     -> false
+    end.
 
 %%----------------------------------------------------------------------------
 
@@ -411,33 +447,31 @@ make_will_msg(#mqtt_frame_connect{ will_retain = Retain,
                payload = Msg }.
 
 process_login(UserBin, PassBin, ProtoVersion,
-              #proc_state{ channels  = {undefined, undefined},
-                           socket    = Sock }) ->
+              #proc_state{ channels     = {undefined, undefined},
+                           socket       = Sock,
+                           adapter_info = AdapterInfo }) ->
     {VHost, UsernameBin} = get_vhost_username(UserBin),
     case amqp_connection:start(#amqp_params_direct{
                                   username     = UsernameBin,
                                   password     = PassBin,
                                   virtual_host = VHost,
-                                  adapter_info = adapter_info(Sock, ProtoVersion)}) of
+                                  adapter_info = set_proto_version(AdapterInfo, ProtoVersion)}) of
         {ok, Connection} ->
             case rabbit_access_control:check_user_loopback(UsernameBin, Sock) of
                 ok          ->
-                  {ok, User} = rabbit_access_control:check_user_login(
-                                 UsernameBin,
-                                 case PassBin of
-                                   none -> [];
-                                   P -> [{password,P}]
-                                 end),
-                  {?CONNACK_ACCEPT, Connection, VHost, #auth_state{
-                                                         user = User,
-                                                         username = UsernameBin,
-                                                         vhost = VHost}};
-                not_allowed -> amqp_connection:close(Connection),
-                               rabbit_log:warning(
-                                 "MQTT login failed for ~p access_refused "
-                                 "(access must be from localhost)~n",
-                                 [binary_to_list(UsernameBin)]),
-                               ?CONNACK_AUTH
+                    [{internal_user, InternalUser}] = amqp_connection:info(
+                        Connection, [internal_user]),
+                    {?CONNACK_ACCEPT, Connection, VHost,
+                                      #auth_state{user = InternalUser,
+                                                  username = UsernameBin,
+                                                  vhost = VHost}};
+                not_allowed ->
+                    amqp_connection:close(Connection),
+                    rabbit_log:warning(
+                      "MQTT login failed for ~p access_refused "
+                      "(access must be from localhost)~n",
+                      [binary_to_list(UsernameBin)]),
+                    ?CONNACK_AUTH
             end;
         {error, {auth_failure, Explanation}} ->
             rabbit_log:error("MQTT login failed for ~p auth_failure: ~s~n",
@@ -604,9 +638,12 @@ amqp_pub(#mqtt_msg{ qos        = Qos,
     PState #proc_state{ unacked_pubs   = UnackedPubs1,
                         awaiting_seqno = SeqNo1 }.
 
-adapter_info(Sock, ProtoVer) ->
-    amqp_connection:socket_adapter_info(
-             Sock, {'MQTT', human_readable_mqtt_version(ProtoVer)}).
+adapter_info(Sock, ProtoName) ->
+    amqp_connection:socket_adapter_info(Sock, {ProtoName, "N/A"}).
+
+set_proto_version(AdapterInfo = #amqp_adapter_info{protocol = {Proto, _}}, Vsn) ->
+    AdapterInfo#amqp_adapter_info{protocol = {Proto,
+        human_readable_mqtt_version(Vsn)}}.
 
 human_readable_mqtt_version(3) ->
     "3.1.0";
index afc042cf91d716c2c5aa025915302b0a45bbb112..7df1a14788d6e0954a03aa547fec1e23973672e0 100644 (file)
@@ -45,7 +45,7 @@ start_link(KeepaliveSup, Ref, Sock) ->
 
     {ok, Pid}.
 
-conserve_resources(Pid, _, Conserve) ->
+conserve_resources(Pid, _, {_, Conserve, _}) ->
     Pid ! {conserve_resources, Conserve},
     ok.
 
@@ -61,7 +61,8 @@ init([KeepaliveSup, Ref, Sock]) ->
               self(), {?MODULE, conserve_resources, []}),
             ProcessorState = rabbit_mqtt_processor:initial_state(Sock,ssl_login_name(Sock)),
             gen_server2:enter_loop(?MODULE, [],
-             control_throttle(
+             rabbit_event:init_stats_timer(
+              control_throttle(
                #state{socket                 = Sock,
                       conn_name              = ConnStr,
                       await_recv             = false,
@@ -71,7 +72,7 @@ init([KeepaliveSup, Ref, Sock]) ->
                       keepalive_sup          = KeepaliveSup,
                       conserve               = false,
                       parse_state            = rabbit_mqtt_frame:initial_state(),
-                      proc_state             = ProcessorState }),
+                      proc_state             = ProcessorState }), #state.stats_timer),
              {backoff, 1000, 1000, 10000});
         {network_error, Reason} ->
             rabbit_net:fast_close(Sock),
@@ -118,7 +119,11 @@ handle_info({inet_reply, _Ref, ok}, State) ->
     {noreply, State, hibernate};
 
 handle_info({inet_async, Sock, _Ref, {ok, Data}},
-            State = #state{ socket = Sock }) ->
+            State = #state{ socket = Sock, connection_state = blocked }) ->
+    {noreply, State#state{ deferred_recv = Data }, hibernate};
+
+handle_info({inet_async, Sock, _Ref, {ok, Data}},
+            State = #state{ socket = Sock, connection_state = running }) ->
     process_received_bytes(
       Data, control_throttle(State #state{ await_recv = false }));
 
@@ -129,11 +134,12 @@ handle_info({inet_reply, _Sock, {error, Reason}}, State = #state {}) ->
     network_error(Reason, State);
 
 handle_info({conserve_resources, Conserve}, State) ->
-    {noreply, control_throttle(State #state{ conserve = Conserve }), hibernate};
+    maybe_process_deferred_recv(
+        control_throttle(State #state{ conserve = Conserve }));
 
 handle_info({bump_credit, Msg}, State) ->
     credit_flow:handle_bump_msg(Msg),
-    {noreply, control_throttle(State), hibernate};
+    maybe_process_deferred_recv(control_throttle(State));
 
 handle_info({start_keepalives, Keepalive},
             State = #state { keepalive_sup = KeepaliveSup, socket = Sock }) ->
@@ -150,49 +156,56 @@ handle_info(keepalive_timeout, State = #state {conn_name = ConnStr,
     log(error, "closing MQTT connection ~p (keepalive timeout)~n", [ConnStr]),
     send_will_and_terminate(PState, {shutdown, keepalive_timeout}, State);
 
+handle_info(emit_stats, State) ->
+    {noreply, emit_stats(State), hibernate};
+
 handle_info(Msg, State) ->
     {stop, {mqtt_unexpected_msg, Msg}, State}.
 
-terminate({network_error, {ssl_upgrade_error, closed}, ConnStr}, _State) ->
+terminate(Reason, State) ->
+    maybe_emit_stats(State),
+    do_terminate(Reason, State).
+
+do_terminate({network_error, {ssl_upgrade_error, closed}, ConnStr}, _State) ->
     log(error, "MQTT detected TLS upgrade error on ~s: connection closed~n",
        [ConnStr]);
 
-terminate({network_error,
+do_terminate({network_error,
            {ssl_upgrade_error,
             {tls_alert, "handshake failure"}}, ConnStr}, _State) ->
     log(error, "MQTT detected TLS upgrade error on ~s: handshake failure~n",
        [ConnStr]);
 
-terminate({network_error,
+do_terminate({network_error,
            {ssl_upgrade_error,
             {tls_alert, "unknown ca"}}, ConnStr}, _State) ->
     log(error, "MQTT detected TLS certificate verification error on ~s: alert 'unknown CA'~n",
        [ConnStr]);
 
-terminate({network_error,
+do_terminate({network_error,
            {ssl_upgrade_error,
             {tls_alert, Alert}}, ConnStr}, _State) ->
     log(error, "MQTT detected TLS upgrade error on ~s: alert ~s~n",
        [ConnStr, Alert]);
 
-terminate({network_error, {ssl_upgrade_error, Reason}, ConnStr}, _State) ->
+do_terminate({network_error, {ssl_upgrade_error, Reason}, ConnStr}, _State) ->
     log(error, "MQTT detected TLS upgrade error on ~s: ~p~n",
         [ConnStr, Reason]);
 
-terminate({network_error, Reason, ConnStr}, _State) ->
+do_terminate({network_error, Reason, ConnStr}, _State) ->
     log(error, "MQTT detected network error on ~s: ~p~n",
         [ConnStr, Reason]);
 
-terminate({network_error, Reason}, _State) ->
+do_terminate({network_error, Reason}, _State) ->
     log(error, "MQTT detected network error: ~p~n", [Reason]);
 
-terminate(normal, #state{proc_state = ProcState,
+do_terminate(normal, #state{proc_state = ProcState,
                          conn_name  = ConnName}) ->
     rabbit_mqtt_processor:close_connection(ProcState),
     log(info, "closing MQTT connection ~p (~s)~n", [self(), ConnName]),
     ok;
 
-terminate(_Reason, #state{proc_state = ProcState}) ->
+do_terminate(_Reason, #state{proc_state = ProcState}) ->
     rabbit_mqtt_processor:close_connection(ProcState),
     ok.
 
@@ -222,9 +235,9 @@ process_received_bytes(<<>>, State = #state{proc_state = ProcState,
         undefined -> ok;
         _         -> log_new_connection(State)
     end,
-    {noreply, State#state{ received_connect_frame = true }, hibernate};
+    {noreply, ensure_stats_timer(State#state{ received_connect_frame = true }), hibernate};
 process_received_bytes(<<>>, State) ->
-    {noreply, State, hibernate};
+    {noreply, ensure_stats_timer(State), hibernate};
 process_received_bytes(Bytes,
                        State = #state{ parse_state = ParseState,
                                        proc_state  = ProcState,
@@ -232,16 +245,17 @@ process_received_bytes(Bytes,
     case rabbit_mqtt_frame:parse(Bytes, ParseState) of
         {more, ParseState1} ->
             {noreply,
-             control_throttle( State #state{ parse_state = ParseState1 }),
+             ensure_stats_timer(control_throttle( State #state{ parse_state = ParseState1 })),
              hibernate};
         {ok, Frame, Rest} ->
             case rabbit_mqtt_processor:process_frame(Frame, ProcState) of
-                {ok, ProcState1} ->
+                {ok, ProcState1, ConnPid} ->
                     PS = rabbit_mqtt_frame:initial_state(),
                     process_received_bytes(
                       Rest,
                       State #state{ parse_state = PS,
-                                    proc_state = ProcState1 });
+                                    proc_state = ProcState1,
+                                    connection = ConnPid });
                 {error, Reason, ProcState1} ->
                     log(info, "MQTT protocol error ~p for connection ~p~n",
                         [Reason, ConnStr]),
@@ -286,10 +300,10 @@ network_error(closed,
               State = #state{ conn_name  = ConnStr,
                               proc_state = PState }) ->
     MqttConn = PState#proc_state.connection,
-    log(case MqttConn of 
+    log(case MqttConn of
             undefined  -> debug;
             _          -> info
-        end, 
+        end,
         "MQTT detected network error for ~p: peer closed TCP connection~n",
         [ConnStr]),
     send_will_and_terminate(PState, State);
@@ -302,6 +316,8 @@ network_error(Reason,
 
 run_socket(State = #state{ connection_state = blocked }) ->
     State;
+run_socket(State = #state{ deferred_recv = Data }) when Data =/= undefined ->
+    State;
 run_socket(State = #state{ await_recv = true }) ->
     State;
 run_socket(State = #state{ socket = Sock }) ->
@@ -320,3 +336,27 @@ control_throttle(State = #state{ connection_state = Flow,
                                                 connection_state = running });
         {_,            _} -> run_socket(State)
     end.
+
+maybe_process_deferred_recv(State = #state{ deferred_recv = undefined }) ->
+    {noreply, State, hibernate};
+maybe_process_deferred_recv(State = #state{ deferred_recv = Data, socket = Sock }) ->
+    handle_info({inet_async, Sock, noref, {ok, Data}},
+                State#state{ deferred_recv = undefined }).
+
+maybe_emit_stats(State) ->
+    rabbit_event:if_enabled(State, #state.stats_timer,
+                            fun() -> emit_stats(State) end).
+
+emit_stats(State=#state{socket=Sock, connection_state=ConnState, connection=Conn}) ->
+    SockInfos = case rabbit_net:getstat(Sock,
+            [recv_oct, recv_cnt, send_oct, send_cnt, send_pend]) of
+        {ok,    SI} -> SI;
+        {error,  _} -> []
+    end,
+    Infos = [{pid, Conn}, {state, ConnState}|SockInfos],
+    rabbit_event:notify(connection_stats, Infos),
+    State1 = rabbit_event:reset_stats_timer(State, #state.stats_timer),
+    ensure_stats_timer(State1).
+
+ensure_stats_timer(State = #state{}) ->
+    rabbit_event:ensure_stats_timer(State, #state.stats_timer, emit_stats).
index 6411d2265531bb14529a9eb4b45854acc10aab8d..665e6078684dcf6cad5d1906670344b5bc9013fd 100644 (file)
 -record(retainer_state, {store_mod,
                          store}).
 
--ifdef(use_specs).
-
--spec(retain/3 :: (pid(), string(), mqtt_msg()) ->
+-spec retain(pid(), string(), mqtt_msg()) ->
     {noreply, NewState :: term()} |
     {noreply, NewState :: term(), timeout() | hibernate} |
-    {stop, Reason :: term(), NewState :: term()}).
-
--endif.
+    {stop, Reason :: term(), NewState :: term()}.
 
 %%----------------------------------------------------------------------------
 
index 221d0032714d6a22572d99404deead531ab2699a..17ee6d2a5c9aac5b3b2247395851b2724a8873ad 100644 (file)
 
 -define(ENCODING, utf8).
 
--ifdef(use_specs).
--spec(start_child/1 :: (binary()) -> supervisor2:startchild_ret()).
--spec(start_child/2 :: (term(), binary()) -> supervisor2:startchild_ret()).
--endif.
+-spec start_child(binary()) -> supervisor2:startchild_ret().
+-spec start_child(term(), binary()) -> supervisor2:startchild_ret().
 
 start_link(SupName) ->
   supervisor2:start_link(SupName, ?MODULE, []).
index f7edb48cd0b359ee790ed16f581efeea07898b64..6a43cb583ca07a2e8c069b7ba0eb3e07ba6dbfeb 100644 (file)
@@ -17,7 +17,7 @@
 -module(rabbit_mqtt_sup).
 -behaviour(supervisor2).
 
--define(MAX_WAIT, 16#ffffffff).
+-include_lib("rabbit_common/include/rabbit.hrl").
 
 -export([start_link/2, init/1]).
 
@@ -40,10 +40,10 @@ init([{Listeners, SslListeners0}]) ->
     {ok, {{one_for_all, 10, 10},
           [{collector,
             {rabbit_mqtt_collector, start_link, []},
-            transient, ?MAX_WAIT, worker, [rabbit_mqtt_collector]},
+            transient, ?WORKER_WAIT, worker, [rabbit_mqtt_collector]},
            {rabbit_mqtt_retainer_sup,
             {rabbit_mqtt_retainer_sup, start_link, [{local, rabbit_mqtt_retainer_sup}]},
-             transient, ?MAX_WAIT, supervisor, [rabbit_mqtt_retainer_sup]} |
+             transient, ?SUPERVISOR_WAIT, supervisor, [rabbit_mqtt_retainer_sup]} |
            listener_specs(fun tcp_listener_spec/1,
                           [SocketOpts, NumTcpAcceptors], Listeners) ++
            listener_specs(fun ssl_listener_spec/1,
index 4700dced46cd9ce7db055cd0df943482703e5f3b..560a533acad38f4a6438e4858889d0eba2196057 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_mqtt,
  [{description, "RabbitMQ MQTT Adapter"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {mod, {rabbit_mqtt, []}},
index 68cee9d2d43c8af0208f8d3704c2088d152e039f..8b37c4b39ff3a06cf46a0b986a6195e11991de47 100644 (file)
@@ -1,4 +1,4 @@
-UPSTREAM_GIT=https://git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.java.git
+UPSTREAM_GIT=https://github.com/eclipse/paho.mqtt.java
 REVISION=00b5b2f99ae8410b7d96d106e080a092c5f92546
 
 JC=javac
@@ -22,7 +22,13 @@ endef
 
 .PHONY: test
 test: build_java_amqp
-       ant test -Dhostname=$(HOSTNAME) -Dcerts.dir=$(SSL_CERTS_DIR)
+       ant test \
+               -Dhostname=$(HOSTNAME) \
+               -Dcerts.dir=$(SSL_CERTS_DIR) \
+               -Dmqtt.ssl.port=$(MQTT_SSL_PORT) \
+               -Dmqtt.port=$(MQTT_PORT) \
+               -Damqp.port=$(AMQP_PORT) \
+               -Dbuild.out=$(DATA_DIR)/build
 
 clean:
        ant clean
index 25da28ce62f674bc5bc48f040e536464b29959f2..b6ba8e8fbecf490246b5ae9819b374d3be171001 100644 (file)
@@ -10,6 +10,9 @@ server.keystore=${test.resources}/server.jks
 server.cert=${certs.dir}/server/cert.pem
 ca.cert=${certs.dir}/testca/cacert.pem
 server.keystore.phrase=bunnyhop
+amqp.port=5672
+mqtt.port=1883
+mqtt.ssl.port=8883
 
 client.keystore=${test.resources}/client.jks
 client.keystore.phrase=bunnychow
index cf73be5a9ac1cb6efcd2ea03ee877246c7285025..c3eb7a6f0fb9dd02555f2a03b1bfcf6572337674 100644 (file)
@@ -36,6 +36,7 @@
 
   <target name="detect-ssl">
     <available property="SSL_AVAILABLE" file="${certs.dir}/client"/>
+    <echo message="certsdir:${certs.dir}" />
     <property name="CLIENT_KEYSTORE_PHRASE" value="bunnies"/>
     <property name="SSL_P12_PASSWORD" value="${certs.password}"/>
   </target>
         <pathelement path="${test.resources}"/>
       </classpath>
       <jvmarg value="-Dhostname=${hostname}"/>
+      <jvmarg value="-Dmqtt.ssl.port=${mqtt.ssl.port}"/>
       <jvmarg value="-Dserver.keystore.passwd=${server.keystore.phrase}"/>
       <jvmarg value="-Dclient.keystore.passwd=${client.keystore.phrase}"/>
       <formatter type="plain"/>
         haltOnFailure="true"
         failureproperty="test.failure"
         fork="yes">
-        <classpath>
-          <path refid="test.javac.classpath"/>
-          <pathelement path="${test.javac.out}"/>
-        </classpath>
+      <classpath>
+        <path refid="test.javac.classpath"/>
+        <pathelement path="${test.javac.out}"/>
+      </classpath>
+      <jvmarg value="-Damqp.port=${amqp.port}"/>
+      <jvmarg value="-Dmqtt.port=${mqtt.port}"/>
+      <jvmarg value="-Dhostname=${hostname}"/>
 
       <formatter type="plain"/>
       <formatter type="xml"/>
diff --git a/rabbitmq-server/deps/rabbitmq_mqtt/test/java_SUITE.erl b/rabbitmq-server/deps/rabbitmq_mqtt/test/java_SUITE.erl
new file mode 100644 (file)
index 0000000..eb011fc
--- /dev/null
@@ -0,0 +1,121 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(java_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-define(BASE_CONF_RABBIT, {rabbit, [{ssl_options, [{fail_if_no_peer_cert, false}]}]}).
+-define(BASE_CONF_MQTT,
+        {rabbitmq_mqtt, [
+           {ssl_cert_login,   true},
+           {allow_anonymous,  true},
+           {tcp_listeners,    []},
+           {ssl_listeners,    []}
+           ]}).
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                java
+                               ]}
+    ].
+
+suite() ->
+    [{timetrap, {seconds, 600}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+merge_app_env(Config) ->
+    {ok, Ssl} = q(Config, [erlang_node_config, rabbit, ssl_options]),
+    Ssl1 = lists:keyreplace(fail_if_no_peer_cert, 1, Ssl, {fail_if_no_peer_cert, false}),
+    Config1 = rabbit_ct_helpers:merge_app_env(Config, {rabbit, [{ssl_options, Ssl1}]}),
+    rabbit_ct_helpers:merge_app_env(Config1, ?BASE_CONF_MQTT).
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE},
+        {rmq_certspwd, "bunnychow"}
+      ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+      [ fun merge_app_env/1 ] ++
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    Hostname = re:replace(os:cmd("hostname"), "\\s+", "", [global,{return,list}]),
+    User = "O=client,CN=" ++ Hostname,
+    {ok,_} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["add_user", User, ""]),
+    {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["set_permissions",  "-p", "/", User, ".*", ".*", ".*"]),
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testsuite cases
+%% -------------------------------------------------------------------
+
+java(Config) ->
+    CertsDir = rabbit_ct_helpers:get_config(Config, rmq_certsdir),
+    MqttPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+    MqttSslPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt_tls),
+    AmqpPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+    DataDir = rabbit_ct_helpers:get_config(Config, priv_dir),
+    os:putenv("DATA_DIR", DataDir),
+    os:putenv("SSL_CERTS_DIR", CertsDir),
+    os:putenv("MQTT_SSL_PORT", erlang:integer_to_list(MqttSslPort)),
+    os:putenv("MQTT_PORT", erlang:integer_to_list(MqttPort)),
+    os:putenv("AMQP_PORT", erlang:integer_to_list(AmqpPort)),
+    {ok, _} = rabbit_ct_helpers:make(Config, make_dir(), ["test"]).
+
+
+make_dir() ->
+    {Src, _} = filename:find_src(?MODULE),
+    filename:dirname(Src).
+
+rpc(Config, M, F, A) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0, M, F, A).
+
+q(P, [K | Rem]) ->
+    case proplists:get_value(K, P) of
+        undefined -> undefined;
+        V -> q(V, Rem)
+    end;
+q(P, []) -> {ok, P}.
+
diff --git a/rabbitmq-server/deps/rabbitmq_mqtt/test/processor_SUITE.erl b/rabbitmq-server/deps/rabbitmq_mqtt/test/processor_SUITE.erl
new file mode 100644 (file)
index 0000000..fe05811
--- /dev/null
@@ -0,0 +1,60 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+
+
+-module(processor_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                ignores_colons_in_username_if_option_set,
+                                interprets_colons_in_username_if_option_not_set
+                               ]}
+    ].
+
+suite() ->
+    [{timetrap, {seconds, 60}}].
+
+init_per_suite(Config) ->
+    ok = application:load(rabbitmq_mqtt),
+    Config.
+end_per_suite(Config) ->
+    ok = application:unload(rabbitmq_mqtt),
+    Config.
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+init_per_testcase(_, Config) -> Config.
+end_per_testcase(_, Config) -> Config.
+
+ignore_colons(B) -> application:set_env(rabbitmq_mqtt, ignore_colons_in_username, B).
+
+ignores_colons_in_username_if_option_set(_Config) ->
+    ignore_colons(true),
+    ?assertEqual({rabbit_mqtt_util:env(vhost), <<"a:b:c">>},
+                  rabbit_mqtt_processor:get_vhost_username(<<"a:b:c">>)).
+
+interprets_colons_in_username_if_option_not_set(_Config) ->
+   ignore_colons(false),
+   ?assertEqual({<<"a:b">>, <<"c">>},
+                 rabbit_mqtt_processor:get_vhost_username(<<"a:b:c">>)).
diff --git a/rabbitmq-server/deps/rabbitmq_mqtt/test/rabbit-test.sh b/rabbitmq-server/deps/rabbitmq_mqtt/test/rabbit-test.sh
deleted file mode 100755 (executable)
index b0c6585..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-CTL=$1
-USER="O=client,CN=$(hostname)"
-
-$CTL add_user "$USER" ''
-$CTL set_permissions -p / "$USER" ".*" ".*" ".*"
diff --git a/rabbitmq-server/deps/rabbitmq_mqtt/test/reader_SUITE.erl b/rabbitmq-server/deps/rabbitmq_mqtt/test/reader_SUITE.erl
new file mode 100644 (file)
index 0000000..5366da6
--- /dev/null
@@ -0,0 +1,136 @@
+-module(reader_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                block
+                               ]}
+    ].
+
+suite() ->
+    [{timetrap, {seconds, 60}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+mqtt_config(Config) ->
+    P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt_extra),
+    P2 = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt_tls_extra),
+    {rabbitmq_mqtt, [
+       {ssl_cert_login,   true},
+       {allow_anonymous,  true},
+       {tcp_listeners,    [P]},
+       {ssl_listeners,    [P2]}
+       ]}.
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE},
+        {rmq_extra_tcp_ports, [tcp_port_mqtt_extra,
+                               tcp_port_mqtt_tls_extra]}
+      ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testsuite cases
+%% -------------------------------------------------------------------
+
+block(Config) ->
+    P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt),
+    % ok = rpc(Config, ?MODULE, change_configuration, [mqtt_config(Config)]),
+    {ok, C} = emqttc:start_link([{host, "localhost"},
+                                 {port, P},
+                                 {client_id, <<"simpleClient">>},
+                                 {proto_ver, 3},
+                                 {logger, info},
+                                 {puback_timeout, 1}]),
+    %% Only here to ensure the connection is really up
+    emqttc:subscribe(C, <<"TopicA">>, qos0),
+    emqttc:publish(C, <<"TopicA">>, <<"Payload">>),
+    expect_publishes(<<"TopicA">>, [<<"Payload">>]),
+    emqttc:unsubscribe(C, [<<"TopicA">>]),
+
+    emqttc:subscribe(C, <<"Topic1">>, qos0),
+
+    %% Not blocked
+    {ok, _} = emqttc:sync_publish(C, <<"Topic1">>, <<"Not blocked yet">>,
+                                  [{qos, 1}]),
+
+    ok = rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.00000001]),
+    ok = rpc(Config, rabbit_alarm, set_alarm, [{{resource_limit, memory, node()}, []}]),
+
+    %% Let it block
+    timer:sleep(100),
+    %% Blocked, but still will publish
+    {error, ack_timeout} = emqttc:sync_publish(C, <<"Topic1">>, <<"Now blocked">>,
+                                  [{qos, 1}]),
+
+    %% Blocked
+    {error, ack_timeout} = emqttc:sync_publish(C, <<"Topic1">>,
+                                               <<"Blocked">>, [{qos, 1}]),
+
+    rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.4]),
+    rpc(Config, rabbit_alarm, clear_alarm, [{resource_limit, memory, node()}]),
+
+    %% Let alarms clear
+    timer:sleep(1000),
+
+    expect_publishes(<<"Topic1">>, [<<"Not blocked yet">>,
+                                    <<"Now blocked">>,
+                                    <<"Blocked">>]),
+
+    emqttc:disconnect(C).
+
+expect_publishes(_Topic, []) -> ok;
+expect_publishes(Topic, [Payload|Rest]) ->
+    receive
+        {publish, Topic, Payload} -> expect_publishes(Topic, Rest)
+        after 500 ->
+            throw({publish_not_delivered, Payload})
+    end.
+
+rpc(Config, M, F, A) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0, M, F, A).
+
+change_configuration({App, Args}) ->
+    ok = application:stop(App),
+    ok = change_cfg(App, Args),
+    application:start(App).
+
+change_cfg(_, []) ->
+    ok;
+change_cfg(App, [{Name,Value}|Rest]) ->
+    ok = application:set_env(App, Name, Value),
+    change_cfg(App, Rest).
+
diff --git a/rabbitmq-server/deps/rabbitmq_mqtt/test/setup-rabbit-test.sh b/rabbitmq-server/deps/rabbitmq_mqtt/test/setup-rabbit-test.sh
deleted file mode 100755 (executable)
index 12462c0..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh -e
-sh -e `dirname $0`/rabbit-test.sh "$RABBITMQCTL -n $RABBITMQ_NODENAME"
index 7eded18898fc108a847933c080de03090df13a35..58d2e8e69a88afc8e5c82da537e10303a55517a2 100644 (file)
@@ -53,8 +53,7 @@ import java.util.concurrent.TimeoutException;
 public class MqttTest extends TestCase implements MqttCallback {
 
     private final String host = "localhost";
-    private final int port = 1883;
-       private final String brokerUrl = "tcp://" + host + ":" + port;
+    private final String brokerUrl = "tcp://" + host + ":" + getPort();
     private String clientId;
     private String clientId2;
     private MqttClient client;
@@ -68,10 +67,28 @@ public class MqttTest extends TestCase implements MqttCallback {
     private int testDelay = 2000;
     private long lastReceipt;
     private boolean expectConnectionFailure;
+    private boolean failOnDelivery = false;
 
     private Connection conn;
     private Channel ch;
 
+    private static int getPort() {
+        Object port = System.getProperty("mqtt.port");
+        assertNotNull(port);
+        return Integer.parseInt(port.toString());
+    }
+
+    private static int getAmqpPort() {
+        Object port = System.getProperty("amqp.port");
+        assertNotNull(port);
+        return Integer.parseInt(port.toString());
+    }
+
+    private static String getHost() {
+        Object host = System.getProperty("hostname");
+        assertNotNull(host);
+        return host.toString();
+    }
     // override 10s limit
     private class MyConnOpts extends MqttConnectOptions {
         private int keepAliveInterval = 60;
@@ -98,7 +115,7 @@ public class MqttTest extends TestCase implements MqttCallback {
     }
 
     @Override
-    public  void tearDown() throws MqttException {
+    public void tearDown() throws MqttException {
         // clean any sticky sessions
         setConOpts(conOpt);
         client = new MqttClient(brokerUrl, clientId, null);
@@ -115,8 +132,10 @@ public class MqttTest extends TestCase implements MqttCallback {
     }
 
     private void setUpAmqp() throws IOException, TimeoutException {
+        int port = getAmqpPort();
         ConnectionFactory cf = new ConnectionFactory();
         cf.setHost(host);
+        cf.setPort(port);
         conn = cf.newConnection();
         ch = conn.createChannel();
     }
@@ -136,7 +155,7 @@ public class MqttTest extends TestCase implements MqttCallback {
     }
 
     public void testConnectFirst() throws MqttException, IOException, InterruptedException {
-        NetworkModule networkModule = new TCPNetworkModule(SocketFactory.getDefault(), host, port, "");
+        NetworkModule networkModule = new TCPNetworkModule(SocketFactory.getDefault(), host, getPort(), "");
         networkModule.start();
         MqttInputStream  mqttIn  = new MqttInputStream (networkModule.getInputStream());
         MqttOutputStream mqttOut = new MqttOutputStream(networkModule.getOutputStream());
@@ -384,6 +403,52 @@ public class MqttTest extends TestCase implements MqttCallback {
         client.disconnect();
     }
 
+    public void  testSessionRedelivery() throws MqttException, InterruptedException {
+        conOpt.setCleanSession(false);
+        client.connect(conOpt);
+        client.subscribe(topic, 1);
+        client.disconnect();
+
+        client2.connect(conOpt);
+        publish(client2, topic, 1, payload);
+        client2.disconnect();
+
+        failOnDelivery = true;
+
+        // Connection should fail. Messages will be redelivered.
+        client.setCallback(this);
+        client.connect(conOpt);
+
+        Thread.sleep(testDelay);
+        // Message has been delivered but connection has failed.
+        Assert.assertEquals(1, receivedMessages.size());
+        Assert.assertEquals(true, Arrays.equals(receivedMessages.get(0).getPayload(), payload));
+
+        Assert.assertFalse(client.isConnected());
+
+        receivedMessages.clear();
+        failOnDelivery = false;
+
+        client.setCallback(this);
+        client.connect(conOpt);
+
+        Thread.sleep(testDelay);
+        // Message has been redelivered after session resume
+        Assert.assertEquals(1, receivedMessages.size());
+        Assert.assertEquals(true, Arrays.equals(receivedMessages.get(0).getPayload(), payload));
+        Assert.assertTrue(client.isConnected());
+        client.disconnect();
+
+        receivedMessages.clear();
+
+        client.setCallback(this);
+        client.connect(conOpt);
+
+        Thread.sleep(testDelay);
+        // This time messaage are acknowledged and won't be redelivered
+        Assert.assertEquals(0, receivedMessages.size());
+    }
+
     public void testCleanSession() throws MqttException, InterruptedException {
         conOpt.setCleanSession(false);
         client.connect(conOpt);
@@ -567,6 +632,9 @@ public class MqttTest extends TestCase implements MqttCallback {
     public void messageArrived(String topic, MqttMessage message) throws Exception {
         lastReceipt = System.currentTimeMillis();
         receivedMessages.add(message);
+        if(failOnDelivery){
+            throw new Exception("failOnDelivery");
+        }
     }
 
     public void deliveryComplete(IMqttDeliveryToken token) {
index c39dc0cb6a8f1c302a03fd15e57e971ae9390db5..b1ee72f001904fb696bd77eac8f64e328dcd7d86 100644 (file)
@@ -36,8 +36,7 @@ import java.util.ArrayList;
 
 public class MqttSSLTest extends TestCase implements MqttCallback {
 
-    private final int port = 8883;
-    private final String brokerUrl = "ssl://" + getHost() + ":" + port;
+    private final String brokerUrl = "ssl://" + getHost() + ":" + getPort();
     private String clientId;
     private String clientId2;
     private MqttClient client;
@@ -48,8 +47,13 @@ public class MqttSSLTest extends TestCase implements MqttCallback {
     private long lastReceipt;
     private boolean expectConnectionFailure;
 
+    private static String getPort() {
+        Object port = System.getProperty("mqtt.ssl.port");
+        assertNotNull(port);
+        return port.toString();
+    }
 
-    private static final String getHost() {
+    private static String getHost() {
         Object host = System.getProperty("hostname");
         assertNotNull(host);
         return host.toString();
diff --git a/rabbitmq-server/deps/rabbitmq_mqtt/test/src/rabbit_mqtt_processor_tests.erl b/rabbitmq-server/deps/rabbitmq_mqtt/test/src/rabbit_mqtt_processor_tests.erl
deleted file mode 100644 (file)
index 53f0c4f..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
--module(rabbit_mqtt_processor_tests).
-
--include_lib("eunit/include/eunit.hrl").
-
-s(X) -> application:set_env(rabbitmq_mqtt, ignore_colons_in_username, X).
-
-get_vhost_username_test_() ->
-    {foreach,
-     fun () -> application:load(rabbitmq_mqtt) end,
-     fun (_) -> s(false) end,
-     [{"ignores colons in username if option set",
-       fun () ->
-               s(true),
-               ?assertEqual({rabbit_mqtt_util:env(vhost), <<"a:b:c">>},
-                            rabbit_mqtt_processor:get_vhost_username(<<"a:b:c">>))
-       end},
-      {"interprets colons in username if option not set",
-       fun() ->
-               ?assertEqual({<<"a:b">>, <<"c">>},
-                            rabbit_mqtt_processor:get_vhost_username(<<"a:b:c">>))
-       end}]}.
diff --git a/rabbitmq-server/deps/rabbitmq_mqtt/test/test.sh b/rabbitmq-server/deps/rabbitmq_mqtt/test/test.sh
deleted file mode 100755 (executable)
index ae60a49..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-$MAKE -C `dirname $0` build_java_amqp
-$MAKE -C `dirname $0` test
similarity index 52%
rename from rabbitmq-server/deps/rabbitmq_mqtt/test/src/rabbit_mqtt_util_tests.erl
rename to rabbitmq-server/deps/rabbitmq_mqtt/test/util_SUITE.erl
index 67ef5a1130d80a6759a641be52f3c2f36076721f..7f68f07596dc228643a0076312e2f82af1ef0ead 100644 (file)
 %%
 %% The Initial Developer of the Original Code is GoPivotal, Inc.
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
 
--module(rabbit_mqtt_util_tests).
+-module(util_SUITE).
+-compile([export_all]).
 
+-include_lib("common_test/include/ct.hrl").
 -include_lib("eunit/include/eunit.hrl").
 
-all_test_() ->
-    {setup,
-     fun setup/0,
-     [fun coerce_exchange/0,
-      fun coerce_vhost/0,
-      fun coerce_default_user/0,
-      fun coerce_default_pass/0]}.
-
-setup() ->
-    application:load(rabbitmq_mqtt).
-
-coerce_exchange() ->
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                coerce_exchange,
+                                coerce_vhost,
+                                coerce_default_user,
+                                coerce_default_pass
+                               ]}
+    ].
+
+suite() ->
+    [{timetrap, {seconds, 60}}].
+
+init_per_suite(Config) ->
+    ok = application:load(rabbitmq_mqtt),
+    Config.
+end_per_suite(Config) ->
+    ok = application:unload(rabbitmq_mqtt),
+    Config.
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+init_per_testcase(_, Config) -> Config.
+end_per_testcase(_, Config) -> Config.
+
+coerce_exchange(_) ->
     ?assertEqual(<<"amq.topic">>, rabbit_mqtt_util:env(exchange)).
 
-coerce_vhost() ->
+coerce_vhost(_) ->
     ?assertEqual(<<"/">>, rabbit_mqtt_util:env(vhost)).
 
-coerce_default_user() ->
+coerce_default_user(_) ->
     ?assertEqual(<<"guest_user">>, rabbit_mqtt_util:env(default_user)).
 
-coerce_default_pass() ->
+coerce_default_pass(_) ->
     ?assertEqual(<<"guest_pass">>, rabbit_mqtt_util:env(default_pass)).
diff --git a/rabbitmq-server/deps/rabbitmq_recent_history_exchange/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_recent_history_exchange/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
diff --git a/rabbitmq-server/deps/rabbitmq_recent_history_exchange/CONTRIBUTING.md b/rabbitmq-server/deps/rabbitmq_recent_history_exchange/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..45bbcbe
--- /dev/null
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 2e4f74a883fce5f9f6b22c728b169f2300b8f938..81e8867a819811b56bb3cff419133fb02697cfd3 100644 (file)
@@ -1,6 +1,6 @@
 PROJECT = rabbitmq_recent_history_exchange
 
-TEST_DEPS = amqp_client
+TEST_DEPS = amqp_client rabbit
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
@@ -12,12 +12,3 @@ ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
 include erlang.mk
-
-# --------------------------------------------------------------------
-# Testing.
-# --------------------------------------------------------------------
-
-WITH_BROKER_TEST_MAKEVARS := \
-        RABBITMQ_CONFIG_FILE=$(CURDIR)/etc/rabbit-test
-WITH_BROKER_TEST_COMMANDS := \
-       rabbit_exchange_type_recent_history_test:test()
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
diff --git a/rabbitmq-server/deps/rabbitmq_recent_history_exchange/test/src/rabbit_exchange_type_recent_history_test_util.erl b/rabbitmq-server/deps/rabbitmq_recent_history_exchange/test/src/rabbit_exchange_type_recent_history_test_util.erl
deleted file mode 100644 (file)
index 37a28da..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ Federation.
-%%
-%% The Initial Developer of the Original Code is VMware, Inc.
-%% Copyright (c) 2007-2013 VMware, Inc.  All rights reserved.
-%%
-
--module(rabbit_exchange_type_recent_history_test_util).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("amqp_client/include/amqp_client.hrl").
-
--compile(export_all).
-
-start_other_node({Name, Port}) ->
-    start_other_node({Name, Port}, "rabbit-" ++ Name).
-
-start_other_node({Name, Port}, Config) ->
-    start_other_node({Name, Port}, Config,
-                     os:getenv("RABBITMQ_ENABLED_PLUGINS_FILE")).
-
-start_other_node({Name, Port}, Config, PluginsFile) ->
-    make(" OTHER_NODE=" ++ Name ++
-         " OTHER_PORT=" ++ integer_to_list(Port) ++
-         " OTHER_CONFIG=" ++ Config ++
-         " OTHER_PLUGINS=" ++ PluginsFile ++
-         " start-other-node"),
-    timer:sleep(1000).
-
-stop_other_node({Name, _Port}) ->
-    make(" OTHER_NODE=" ++ Name ++
-         " stop-other-node"),
-    timer:sleep(1000).
-
-reset_other_node({Name, _Port}) ->
-    make(" OTHER_NODE=" ++ Name ++
-         " reset-other-node"),
-    timer:sleep(1000).
-
-cluster_other_node({Name, _Port}, {MainName, _Port2}) ->
-    make(" OTHER_NODE=" ++ Name ++
-         " MAIN_NODE=" ++ atom_to_list(n(MainName)) ++
-         " cluster-other-node"),
-    timer:sleep(1000).
-
-rabbitmqctl(Args) ->
-    execute(os:getenv("RABBITMQCTL") ++ " " ++ Args),
-    timer:sleep(100).
-
-make(Args) ->
-    Make = case os:getenv("MAKE") of
-        false -> "make";
-        M     -> M
-    end,
-    execute(Make ++ " " ++ Args).
-
-execute(Cmd) ->
-    Res = os:cmd(Cmd ++ " ; echo $?"),
-    case lists:reverse(string:tokens(Res, "\n")) of
-        ["0" | _] -> ok;
-        _         -> exit({command_failed, Cmd, Res})
-    end.
-
-plugin_dir() ->
-    {ok, [[File]]} = init:get_argument(config),
-    filename:dirname(filename:dirname(File)).
-
-n(Nodename) ->
-    {_, NodeHost} = rabbit_nodes:parts(node()),
-    rabbit_nodes:make({Nodename, NodeHost}).
similarity index 53%
rename from rabbitmq-server/deps/rabbitmq_recent_history_exchange/test/src/rabbit_exchange_type_recent_history_test.erl
rename to rabbitmq-server/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl
index 102ecfdbcb08a811108a853af47b65c9016cbde4..bf4145ee75d843e9801b323ec46408c9ab4f95f0 100644 (file)
--module(rabbit_exchange_type_recent_history_test).
+-module(system_SUITE).
 
--export([test/0]).
+-compile(export_all).
 
+-include_lib("common_test/include/ct.hrl").
 -include_lib("eunit/include/eunit.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
 -include("rabbit_recent_history.hrl").
 
--define(RABBIT, {"rabbit", 5672}).
--define(HARE,   {"hare", 5673}).
-
--import(rabbit_exchange_type_recent_history_test_util,
-        [start_other_node/1, cluster_other_node/2,
-         stop_other_node/1]).
-
-test() ->
-    ok = eunit:test(tests(?MODULE, 60), [verbose]).
-
-default_length_test() ->
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                default_length_test,
+                                length_argument_test,
+                                wrong_argument_type_test,
+                                no_store_test,
+                                e2e_test,
+                                multinode_test
+                               ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    inets:start(),
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE},
+        {rmq_nodes_count,     2}
+      ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    TestCaseName = rabbit_ct_helpers:config_to_testcase_name(Config, Testcase),
+    rabbit_ct_helpers:set_config(Config, {test_resource_name,
+                                          re:replace(TestCaseName, "/", "-", [global, {return, list}])}).
+
+end_per_testcase(_Testcase, Config) ->
+    Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+default_length_test(Config) ->
     Qs = qs(),
-    test0(fun () ->
-                  #'basic.publish'{exchange = <<"e">>}
+    test0(Config, fun () ->
+                  #'basic.publish'{exchange = make_exchange_name(Config, "0")}
           end,
           fun() ->
                   #amqp_msg{props = #'P_basic'{}, payload = <<>>}
           end, [], Qs, 100, length(Qs) * ?KEEP_NB).
 
-length_argument_test() ->
+length_argument_test(Config) ->
     Qs = qs(),
-    test0(fun () ->
-                  #'basic.publish'{exchange = <<"e">>}
+    test0(Config, fun () ->
+                  #'basic.publish'{exchange = make_exchange_name(Config, "0")}
           end,
           fun() ->
                   #amqp_msg{props = #'P_basic'{}, payload = <<>>}
           end, [{<<"x-recent-history-length">>, long, 30}], Qs, 100, length(Qs) * 30).
 
-wrong_argument_type_test() ->
-    wrong_argument_type_test0(-30),
-    wrong_argument_type_test0(0).
+wrong_argument_type_test(Config) ->
+    wrong_argument_type_test0(Config, -30),
+    wrong_argument_type_test0(Config, 0).
 
 
-no_store_test() ->
+no_store_test(Config) ->
     Qs = qs(),
-    test0(fun () ->
-                  #'basic.publish'{exchange = <<"e">>}
+    test0(Config, fun () ->
+                  #'basic.publish'{exchange = make_exchange_name(Config, "0")}
           end,
           fun() ->
                   H = [{<<"x-recent-history-no-store">>, bool, true}],
                   #amqp_msg{props = #'P_basic'{headers = H}, payload = <<>>}
           end, [], Qs, 100, 0).
 
-e2e_test() ->
+e2e_test(Config) ->
     MsgCount = 10,
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Chan} = amqp_connection:open_channel(Conn),
+
+    {Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
 
     #'exchange.declare_ok'{} =
         amqp_channel:call(Chan,
                           #'exchange.declare' {
-                            exchange = <<"e1">>,
+                            exchange = make_exchange_name(Config, "1"),
                             type = <<"x-recent-history">>,
                             auto_delete = true
                            }),
@@ -65,7 +111,7 @@ e2e_test() ->
     #'exchange.declare_ok'{} =
         amqp_channel:call(Chan,
                           #'exchange.declare' {
-                            exchange = <<"e2">>,
+                            exchange = make_exchange_name(Config, "2"),
                             type = <<"direct">>,
                             auto_delete = true
                            }),
@@ -78,21 +124,21 @@ e2e_test() ->
     #'queue.bind_ok'{} =
         amqp_channel:call(Chan, #'queue.bind' {
                                    queue = Q,
-                                   exchange = <<"e2">>,
+                                   exchange = make_exchange_name(Config, "2"),
                                    routing_key = <<"">>
                                   }),
 
     #'tx.select_ok'{} = amqp_channel:call(Chan, #'tx.select'{}),
     [amqp_channel:call(Chan,
-                       #'basic.publish'{exchange = <<"e1">>},
+                       #'basic.publish'{exchange = make_exchange_name(Config, "1")},
                        #amqp_msg{props = #'P_basic'{}, payload = <<>>}) ||
         _ <- lists:duplicate(MsgCount, const)],
     amqp_channel:call(Chan, #'tx.commit'{}),
 
     amqp_channel:call(Chan,
                       #'exchange.bind' {
-                         source      = <<"e1">>,
-                         destination = <<"e2">>,
+                         source      = make_exchange_name(Config, "1"),
+                         destination = make_exchange_name(Config, "2"),
                          routing_key = <<"">>
                         }),
 
@@ -104,24 +150,20 @@ e2e_test() ->
 
     ?assertEqual(MsgCount, Count),
 
-    amqp_channel:call(Chan, #'exchange.delete' { exchange = <<"e1">> }),
-    amqp_channel:call(Chan, #'exchange.delete' { exchange = <<"e2">> }),
+    amqp_channel:call(Chan, #'exchange.delete' { exchange = make_exchange_name(Config, "1") }),
+    amqp_channel:call(Chan, #'exchange.delete' { exchange = make_exchange_name(Config, "2") }),
     amqp_channel:call(Chan, #'queue.delete' { queue = Q }),
-    amqp_channel:close(Chan),
-    amqp_connection:close(Conn),
-    ok.
 
-multinode_test() ->
-    start_other_node(?HARE),
-    cluster_other_node(?HARE, ?RABBIT),
+    rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan),
+    ok.
 
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{port=5673}),
-    {ok, Chan} = amqp_connection:open_channel(Conn),
+multinode_test(Config) ->
+    {Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 1),
 
     #'exchange.declare_ok'{} =
         amqp_channel:call(Chan,
                           #'exchange.declare' {
-                            exchange = <<"e1">>,
+                            exchange = make_exchange_name(Config, "1"),
                             type = <<"x-recent-history">>,
                             auto_delete = false
                            }),
@@ -134,17 +176,16 @@ multinode_test() ->
     #'queue.bind_ok'{} =
         amqp_channel:call(Chan, #'queue.bind' {
                                    queue = Q,
-                                   exchange = <<"e1">>,
+                                   exchange = make_exchange_name(Config, "1"),
                                    routing_key = <<"">>
                                   }),
 
     amqp_channel:call(Chan, #'queue.delete' { queue = Q }),
-    amqp_channel:close(Chan),
-    amqp_connection:close(Conn),
-    stop_other_node(?HARE),
+    rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan),
+
+    rabbit_ct_broker_helpers:restart_broker(Config, 1),
 
-    {ok, Conn2} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Chan2} = amqp_connection:open_channel(Conn2),
+    {Conn2, Chan2} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
 
     #'queue.declare_ok'{queue = Q2} =
         amqp_channel:call(Chan2, #'queue.declare' {
@@ -154,23 +195,22 @@ multinode_test() ->
     #'queue.bind_ok'{} =
         amqp_channel:call(Chan2, #'queue.bind' {
                                    queue = Q2,
-                                   exchange = <<"e1">>,
+                                   exchange = make_exchange_name(Config, "1"),
                                    routing_key = <<"">>
                                   }),
 
-    amqp_channel:call(Chan2, #'exchange.delete' { exchange = <<"e2">> }),
+    amqp_channel:call(Chan2, #'exchange.delete' { exchange = make_exchange_name(Config, "2") }),
     amqp_channel:call(Chan2, #'queue.delete' { queue = Q2 }),
-    amqp_channel:close(Chan2),
-    amqp_connection:close(Conn2),
+
+    rabbit_ct_client_helpers:close_connection_and_channel(Conn2, Chan2),
     ok.
 
-test0(MakeMethod, MakeMsg, DeclareArgs, Queues, MsgCount, ExpectedCount) ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Chan} = amqp_connection:open_channel(Conn),
+test0(Config, MakeMethod, MakeMsg, DeclareArgs, Queues, MsgCount, ExpectedCount) ->
+    Chan = rabbit_ct_client_helpers:open_channel(Config),
     #'exchange.declare_ok'{} =
         amqp_channel:call(Chan,
                           #'exchange.declare' {
-                            exchange = <<"e">>,
+                            exchange = make_exchange_name(Config, "0"),
                             type = <<"x-recent-history">>,
                             auto_delete = true,
                             arguments = DeclareArgs
@@ -188,7 +228,7 @@ test0(MakeMethod, MakeMsg, DeclareArgs, Queues, MsgCount, ExpectedCount) ->
 
     [#'queue.bind_ok'{} =
          amqp_channel:call(Chan, #'queue.bind' { queue = Q,
-                                                 exchange = <<"e">>,
+                                                 exchange = make_exchange_name(Config, "0"),
                                                  routing_key = <<"">>})
      || Q <- Queues],
 
@@ -200,23 +240,22 @@ test0(MakeMethod, MakeMsg, DeclareArgs, Queues, MsgCount, ExpectedCount) ->
              M
          end || Q <- Queues],
 
-
     ?assertEqual(ExpectedCount, lists:sum(Counts)),
 
-    amqp_channel:call(Chan, #'exchange.delete' { exchange = <<"e">> }),
+    amqp_channel:call(Chan, #'exchange.delete' { exchange = make_exchange_name(Config, "0") }),
     [amqp_channel:call(Chan, #'queue.delete' { queue = Q }) || Q <- Queues],
-    amqp_channel:close(Chan),
-    amqp_connection:close(Conn),
+    rabbit_ct_client_helpers:close_channel(Chan),
+
     ok.
 
-wrong_argument_type_test0(Length) ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Chan} = amqp_connection:open_channel(Conn),
+wrong_argument_type_test0(Config, Length) ->
+    Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config),
+    Chan = amqp_connection:open_channel(Conn),
     DeclareArgs = [{<<"x-recent-history-length">>, long, Length}],
     process_flag(trap_exit, true),
     ?assertExit(_, amqp_channel:call(Chan,
                           #'exchange.declare' {
-                            exchange = <<"e">>,
+                            exchange = make_exchange_name(Config, "0"),
                             type = <<"x-recent-history">>,
                             auto_delete = true,
                             arguments = DeclareArgs
@@ -227,8 +266,6 @@ wrong_argument_type_test0(Length) ->
 qs() ->
     [<<"q0">>, <<"q1">>, <<"q2">>, <<"q3">>].
 
-tests(Module, Timeout) ->
-    {foreach, fun() -> ok end,
-     [{timeout, Timeout, fun () -> Module:F() end} ||
-         {F, _Arity} <- proplists:get_value(exports, Module:module_info()),
-         string:right(atom_to_list(F), 5) =:= "_test"]}.
+make_exchange_name(Config, Suffix) ->
+    B = rabbit_ct_helpers:get_config(Config, test_resource_name),
+    erlang:list_to_binary("x-" ++ B ++ "-" ++ Suffix).
diff --git a/rabbitmq-server/deps/rabbitmq_sharding/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_sharding/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
diff --git a/rabbitmq-server/deps/rabbitmq_sharding/CONTRIBUTING.md b/rabbitmq-server/deps/rabbitmq_sharding/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..45bbcbe
--- /dev/null
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
index 99ef9eec260ef00455b4d9199682c0f7d9ef93e6..d833469013f48a56f1fdab3447db71a629540d37 100644 (file)
@@ -1,6 +1,6 @@
 PROJECT = rabbitmq_sharding
 
-TEST_DEPS = amqp_client
+TEST_DEPS = rabbit amqp_client rabbitmq_consistent_hash_exchange
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
@@ -12,12 +12,3 @@ ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
 include erlang.mk
-
-# --------------------------------------------------------------------
-# Testing.
-# --------------------------------------------------------------------
-
-WITH_BROKER_TEST_MAKEVARS := \
-       RABBITMQ_CONFIG_FILE=$(CURDIR)/etc/rabbit-test
-WITH_BROKER_TEST_COMMANDS := \
-       rabbit_sharding_test_all:all_tests()
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
diff --git a/rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_hash_exchange_SUITE.erl b/rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_hash_exchange_SUITE.erl
new file mode 100644 (file)
index 0000000..82ee011
--- /dev/null
@@ -0,0 +1,157 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Sharding Plugin
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_hash_exchange_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                routed_to_zero_queue_test,
+                                routed_to_one_queue_test,
+                                routed_to_many_queue_test
+                               ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE}
+      ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    TestCaseName = rabbit_ct_helpers:config_to_testcase_name(Config, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(Config, {test_resource_name,
+                                                    re:replace(TestCaseName, "/", "-", [global, {return, list}])}),
+    rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Test cases
+%% -------------------------------------------------------------------
+
+routed_to_zero_queue_test(Config) ->
+    test0(Config, fun () ->
+                  #'basic.publish'{exchange = make_exchange_name(Config, "0"), routing_key = rnd()}
+          end,
+          fun() ->
+                  #amqp_msg{props = #'P_basic'{}, payload = <<>>}
+          end, [], 5, 0),
+
+    passed.
+
+routed_to_one_queue_test(Config) ->
+    test0(Config, fun () ->
+                  #'basic.publish'{exchange = make_exchange_name(Config, "0"), routing_key = rnd()}
+          end,
+          fun() ->
+                  #amqp_msg{props = #'P_basic'{}, payload = <<>>}
+          end, [<<"q1">>, <<"q2">>, <<"q3">>], 1, 1),
+
+    passed.
+
+routed_to_many_queue_test(Config) ->
+    test0(Config, fun () ->
+                  #'basic.publish'{exchange = make_exchange_name(Config, "0"), routing_key = rnd()}
+          end,
+          fun() ->
+                  #amqp_msg{props = #'P_basic'{}, payload = <<>>}
+          end, [<<"q1">>, <<"q2">>, <<"q3">>], 5, 5),
+
+    passed.
+
+test0(Config, MakeMethod, MakeMsg, Queues, MsgCount, Count) ->
+    {Conn, Chan} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+    E = make_exchange_name(Config, "0"),
+
+    #'exchange.declare_ok'{} =
+        amqp_channel:call(Chan,
+                          #'exchange.declare' {
+                            exchange = E,
+                            type = <<"x-modulus-hash">>,
+                            auto_delete = true
+                           }),
+    [#'queue.declare_ok'{} =
+         amqp_channel:call(Chan, #'queue.declare' {
+                             queue = Q, exclusive = true }) || Q <- Queues],
+    [#'queue.bind_ok'{} =
+         amqp_channel:call(Chan, #'queue.bind'{queue = Q,
+                                               exchange = E,
+                                               routing_key = <<"">>})
+     || Q <- Queues],
+
+    amqp_channel:call(Chan, #'confirm.select'{}),
+
+    [amqp_channel:call(Chan,
+                       MakeMethod(),
+                       MakeMsg()) || _ <- lists:duplicate(MsgCount, const)],
+
+    % ensure that the messages have been delivered to the queues before asking
+    % for the message count
+    amqp_channel:wait_for_confirms_or_die(Chan),
+
+    Counts =
+        [begin
+             #'queue.declare_ok'{message_count = M} =
+                 amqp_channel:call(Chan, #'queue.declare' {queue     = Q,
+                                                           exclusive = true }),
+             M
+         end || Q <- Queues],
+
+    ?assertEqual(Count, lists:sum(Counts)),
+
+    amqp_channel:call(Chan, #'exchange.delete' { exchange = E }),
+    [amqp_channel:call(Chan, #'queue.delete' { queue = Q }) || Q <- Queues],
+
+    rabbit_ct_client_helpers:close_connection_and_channel(Conn, Chan),
+    ok.
+
+rnd() ->
+    list_to_binary(integer_to_list(rand_compat:uniform(1000000))).
+
+make_exchange_name(Config, Suffix) ->
+    B = rabbit_ct_helpers:get_config(Config, test_resource_name),
+    erlang:list_to_binary("x-" ++ B ++ "-" ++ Suffix).
diff --git a/rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_hash_exchange_test.erl b/rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_hash_exchange_test.erl
deleted file mode 100644 (file)
index 1237a87..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ Sharding Plugin
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(rabbit_hash_exchange_test).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("amqp_client/include/amqp_client.hrl").
-
-routed_to_zero_queue_test() ->
-    test0(fun () ->
-                  #'basic.publish'{exchange = <<"e">>, routing_key = rnd()}
-          end,
-          fun() ->
-                  #amqp_msg{props = #'P_basic'{}, payload = <<>>}
-          end, [], 5, 0).
-
-routed_to_one_queue_test() ->
-    test0(fun () ->
-                  #'basic.publish'{exchange = <<"e">>, routing_key = rnd()}
-          end,
-          fun() ->
-                  #amqp_msg{props = #'P_basic'{}, payload = <<>>}
-          end, [<<"q1">>, <<"q2">>, <<"q3">>], 1, 1).
-
-routed_to_many_queue_test() ->
-    test0(fun () ->
-                  #'basic.publish'{exchange = <<"e">>, routing_key = rnd()}
-          end,
-          fun() ->
-                  #amqp_msg{props = #'P_basic'{}, payload = <<>>}
-          end, [<<"q1">>, <<"q2">>, <<"q3">>], 5, 5).
-
-test0(MakeMethod, MakeMsg, Queues, MsgCount, Count) ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Chan} = amqp_connection:open_channel(Conn),
-    #'exchange.declare_ok'{} =
-        amqp_channel:call(Chan,
-                          #'exchange.declare' {
-                            exchange = <<"e">>,
-                            type = <<"x-modulus-hash">>,
-                            auto_delete = true
-                           }),
-    [#'queue.declare_ok'{} =
-         amqp_channel:call(Chan, #'queue.declare' {
-                             queue = Q, exclusive = true }) || Q <- Queues],
-    [#'queue.bind_ok'{} =
-         amqp_channel:call(Chan, #'queue.bind' { queue = Q,
-                                                 exchange = <<"e">>,
-                                                 routing_key = <<"">> })
-     || Q <- Queues],
-
-    amqp_channel:call(Chan, #'confirm.select'{}),
-
-    [amqp_channel:call(Chan,
-                       MakeMethod(),
-                       MakeMsg()) || _ <- lists:duplicate(MsgCount, const)],
-
-    % ensure that the messages have been delivered to the queues before asking
-    % for the message count
-    amqp_channel:wait_for_confirms_or_die(Chan),
-
-    Counts =
-        [begin
-             #'queue.declare_ok'{message_count = M} =
-                 amqp_channel:call(Chan, #'queue.declare' {queue     = Q,
-                                                           exclusive = true }),
-             M
-         end || Q <- Queues],
-
-    ?assertEqual(Count, lists:sum(Counts)),
-
-    amqp_channel:call(Chan, #'exchange.delete' { exchange = <<"e">> }),
-    [amqp_channel:call(Chan, #'queue.delete' { queue = Q }) || Q <- Queues],
-    amqp_channel:close(Chan),
-    amqp_connection:close(Conn),
-    ok.
-
-rnd() ->
-    list_to_binary(integer_to_list(random:uniform(1000000))).
diff --git a/rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_sharding_SUITE.erl b/rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_sharding_SUITE.erl
new file mode 100644 (file)
index 0000000..20e9b35
--- /dev/null
@@ -0,0 +1,330 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ Sharding Plugin
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_sharding_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(TEST_X, <<"sharding.test">>).
+
+-import(rabbit_sharding_util, [a2b/1, exchange_bin/1]).
+-import(rabbit_ct_broker_helpers, [set_parameter/5, clear_parameter/4,
+                                   set_policy/6, clear_policy/3]).
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                shard_empty_routing_key_test,
+                                shard_queue_creation_test,
+                                shard_queue_creation2_test,
+                                shard_update_spn_test,
+                                shard_decrease_spn_keep_queues_test,
+                                shard_update_routing_key_test,
+                                shard_basic_consume_interceptor_test,
+                                shard_auto_scale_cluster_test,
+                                queue_declare_test
+                               ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    inets:start(),
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE},
+        {rmq_nodes_count,     2}
+      ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    TestCaseName = rabbit_ct_helpers:config_to_testcase_name(Config, Testcase),
+    rabbit_ct_helpers:set_config(Config, {test_resource_name,
+                                          re:replace(TestCaseName, "/", "-", [global, {return, list}])}).
+
+end_per_testcase(_Testcase, Config) ->
+    Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+shard_empty_routing_key_test(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              amqp_channel:call(Ch, x_declare(?TEST_X)),
+              set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3)),
+              timer:sleep(1000),
+              ?assertEqual(6, length(queues(Config, 0))),
+
+              teardown(Config, Ch,
+                       [{?TEST_X, 6}],
+                       [<<"3_shard">>])
+      end).
+
+shard_queue_creation_test(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              amqp_channel:call(Ch, x_declare(?TEST_X)),
+              set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+              ?assertEqual(6, length(queues(Config, 0))),
+
+              teardown(Config, Ch,
+                       [{?TEST_X, 6}],
+                       [<<"3_shard">>])
+      end).
+
+shard_queue_creation2_test(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+              ?assertEqual(0, length(queues(Config, 0))),
+
+              amqp_channel:call(Ch, x_declare(?TEST_X)),
+
+              ?assertEqual(6, length(queues(Config, 0))),
+
+              teardown(Config, Ch,
+                       [{?TEST_X, 6}],
+                       [<<"3_shard">>])
+      end).
+
+%% SPN = Shards Per Node
+shard_update_spn_test(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              amqp_channel:call(Ch, x_declare(?TEST_X)),
+              set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+              ?assertEqual(6, length(queues(Config, 0))),
+
+              set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(5, <<"1234">>)),
+              ?assertEqual(10, length(queues(Config, 0))),
+
+              teardown(Config, Ch,
+                       [{?TEST_X, 5}],
+                       [<<"3_shard">>])
+      end).
+
+shard_decrease_spn_keep_queues_test(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              amqp_channel:call(Ch, x_declare(?TEST_X)),
+              set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(5, <<"1234">>)),
+              ?assertEqual(10, length(queues(Config, 0))),
+
+              set_policy(Config, 0, <<"3_shard">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+              ?assertEqual(10, length(queues(Config, 0))),
+
+              teardown(Config, Ch,
+                       [{?TEST_X, 5}],
+                       [<<"3_shard">>])
+      end).
+
+
+%% changes the routing key policy, therefore the queues should be
+%% unbound first and then bound with the new routing key.
+shard_update_routing_key_test(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              amqp_channel:call(Ch, x_declare(?TEST_X)),
+              set_policy(Config, 0, <<"rkey">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+              timer:sleep(1000),
+              Bs = bindings(Config, 0, ?TEST_X),
+
+              set_policy(Config, 0, <<"rkey">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"4321">>)),
+              timer:sleep(1000),
+              Bs2 = bindings(Config, 0, ?TEST_X),
+
+              ?assert(Bs =/= Bs2),
+
+              teardown(Config, Ch,
+                       [{?TEST_X, 1}],
+                       [<<"rkey">>])
+      end).
+
+%% tests that the interceptor returns queue names
+%% sorted by consumer count and then by queue index.
+shard_basic_consume_interceptor_test(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              Sh = ?TEST_X,
+              amqp_channel:call(Ch, x_declare(Sh)),
+              set_policy(Config, 0, <<"three">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+
+              start_consumer(Ch, Sh),
+              assert_consumers(Config, Sh, 0, 1),
+              assert_consumers(Config, Sh, 1, 0),
+              assert_consumers(Config, Sh, 2, 0),
+
+              start_consumer(Ch, Sh),
+              assert_consumers(Config, Sh, 0, 1),
+              assert_consumers(Config, Sh, 1, 1),
+              assert_consumers(Config, Sh, 2, 0),
+
+              start_consumer(Ch, Sh),
+              assert_consumers(Config, Sh, 0, 1),
+              assert_consumers(Config, Sh, 1, 1),
+              assert_consumers(Config, Sh, 2, 1),
+
+              start_consumer(Ch, Sh),
+              assert_consumers(Config, Sh, 0, 2),
+              assert_consumers(Config, Sh, 1, 1),
+              assert_consumers(Config, Sh, 2, 1),
+
+              teardown(Config, Ch,
+                       [{?TEST_X, 6}],
+                       [<<"three">>])
+      end).
+
+shard_auto_scale_cluster_test(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              Sh = ?TEST_X,
+              amqp_channel:call(Ch, x_declare(Sh)),
+              set_policy(Config, 0, <<"three">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+
+              ?assertEqual(6, length(queues(Config, 0))),
+              Qs = queues(Config, 0),
+
+              ?assertEqual(6, length(Qs)),
+              Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+              ?assertEqual(Nodes, lists:usort(queue_nodes(Qs))),
+
+              rabbit_ct_broker_helpers:stop_broker(Config, 1),
+
+              teardown(Config, Ch,
+                       [{?TEST_X, 6}],
+                       [<<"three">>])
+      end).
+
+queue_declare_test(Config) ->
+    with_ch(Config,
+      fun (Ch) ->
+              amqp_channel:call(Ch, x_declare(?TEST_X)),
+              set_policy(Config, 0, <<"declare">>, <<"^sharding">>, <<"exchanges">>, policy_definition(3, <<"1234">>)),
+
+              Declare = #'queue.declare'{queue = <<"sharding.test">>,
+                                         auto_delete = false,
+                                         durable = true},
+
+              #'queue.declare_ok'{queue = Q} =
+                  amqp_channel:call(Ch, Declare),
+
+              ?assertEqual(Q, shard_q(Config, 0, xr(?TEST_X), 0)),
+
+              teardown(Config, Ch,
+                       [{?TEST_X, 6}],
+                       [<<"declare">>])
+      end).
+
+start_consumer(Ch, Shard) ->
+    amqp_channel:call(Ch, #'basic.consume'{queue = Shard}).
+
+assert_consumers(Config, Shard, QInd, Count) ->
+    Q0 = qr(shard_q(Config, 0, xr(Shard), QInd)),
+    [{consumers, C0}] = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_sharding_interceptor, consumer_count, [Q0]),
+    ?assertEqual(C0, Count).
+
+queues(Config, NodeIndex) ->
+    case rabbit_ct_broker_helpers:rpc(Config, NodeIndex, rabbit_amqqueue, list, [<<"/">>]) of
+        {badrpc, _} -> [];
+        Qs          -> Qs
+    end.
+
+bindings(Config, NodeIndex, XName) ->
+    case rabbit_ct_broker_helpers:rpc(Config, NodeIndex, rabbit_binding, list_for_source, [xr(XName)]) of
+        {badrpc, _} -> [];
+        Bs          -> Bs
+    end.
+
+with_ch(Config, Fun) ->
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+    Fun(Ch),
+    rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch),
+    cleanup(Config, 0),
+    ok.
+
+cleanup(Config) ->
+    cleanup(Config, 0).
+cleanup(Config, NodeIndex) ->
+    [rabbit_ct_broker_helpers:rpc(Config, NodeIndex, rabbit_amqqueue, delete, [Q, false, false])
+     || Q <- queues(Config, 0)].
+
+teardown(Config, Ch, Xs, Policies) ->
+    [begin
+         amqp_channel:call(Ch, x_delete(XName)),
+         delete_queues(Config, Ch, XName, N)
+     end || {XName, N} <- Xs],
+    [clear_policy(Config, 0, Policy) || Policy <- Policies].
+
+delete_queues(Config, Ch, Name, N) ->
+    [amqp_channel:call(Ch, q_delete(Config, Name, QInd)) || QInd <- lists:seq(0, N-1)].
+
+x_declare(Name) -> x_declare(Name, <<"x-modulus-hash">>).
+
+x_declare(Name, Type) ->
+    #'exchange.declare'{exchange = Name,
+                        type     = Type,
+                        durable  = true}.
+
+x_delete(Name) ->
+    #'exchange.delete'{exchange = Name}.
+
+q_delete(Config, Name, QInd) ->
+    #'queue.delete'{queue = shard_q(Config, 0, xr(Name), QInd)}.
+
+shard_q(Config, NodeIndex, X, N) ->
+    rabbit_sharding_util:make_queue_name(
+      exchange_bin(X), a2b(rabbit_ct_broker_helpers:get_node_config(Config, NodeIndex, nodename)), N).
+
+policy_definition(SPN) ->
+    [{<<"shards-per-node">>, SPN}].
+
+policy_definition(SPN, RK) ->
+    [{<<"shards-per-node">>, SPN}, {<<"routing-key">>, RK}].
+
+queue_nodes(Qs) ->
+    [queue_node(Q) || Q <- Qs].
+
+queue_node(#amqqueue{pid = Pid}) ->
+    node(Pid).
+
+xr(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
+qr(Name) -> rabbit_misc:r(<<"/">>, queue, Name).
diff --git a/rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_sharding_test.erl b/rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_sharding_test.erl
deleted file mode 100644 (file)
index 82b2cac..0000000
+++ /dev/null
@@ -1,280 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ Sharding Plugin
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(rabbit_sharding_test).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("amqp_client/include/amqp_client.hrl").
-
-%% Used everywhere
--define(RABBIT, {"rabbit",  5672}).
--define(HARE,   {"hare", 5673}).
--define(TEST_X, <<"sharding.test">>).
-
--import(rabbit_sharding_test_util,
-        [set_param/3, set_pol/3, clear_pol/1,
-         start_other_node/1, cluster_other_node/2, n/1,
-         reset_other_node/1, stop_other_node/1, xr/1, qr/1]).
-
--import(rabbit_sharding_util, [a2b/1, exchange_bin/1]).
-
-shard_empty_routing_key_test() ->
-    with_ch(
-      fun (Ch) ->
-              exchange_op(Ch, x_declare(?TEST_X)),
-              set_pol("3_shard", "^sharding\\.", policy(3)),
-              ?assertEqual(3, length(queues(?RABBIT))),
-
-              teardown(Ch,
-                       [{?TEST_X, 3}],
-                       ["3_shard"])
-      end).
-
-shard_queue_creation_test() ->
-    with_ch(
-      fun (Ch) ->
-              exchange_op(Ch, x_declare(?TEST_X)),
-              set_pol("3_shard", "^sharding\\.", policy(3, "1234")),
-              ?assertEqual(3, length(queues(?RABBIT))),
-
-              teardown(Ch,
-                       [{?TEST_X, 3}],
-                       ["3_shard"])
-      end).
-
-shard_queue_creation2_test() ->
-    with_ch(
-      fun (Ch) ->
-              set_pol("3_shard", "^sharding\\.", policy(3, "1234")),
-              ?assertEqual(0, length(queues(?RABBIT))),
-
-              exchange_op(Ch, x_declare(?TEST_X)),
-
-              ?assertEqual(3, length(queues(?RABBIT))),
-
-              teardown(Ch,
-                       [{?TEST_X, 3}],
-                       ["3_shard"])
-      end).
-
-%% SPN = Shards Per Node
-shard_update_spn_test() ->
-    with_ch(
-      fun (Ch) ->
-              exchange_op(Ch, x_declare(?TEST_X)),
-              set_pol("3_shard", "^sharding\\.", policy(3, "1234")),
-              ?assertEqual(3, length(queues(?RABBIT))),
-
-              set_pol("3_shard", "^sharding\\.", policy(5, "1234")),
-              ?assertEqual(5, length(queues(?RABBIT))),
-
-              teardown(Ch,
-                       [{?TEST_X, 5}],
-                       ["3_shard"])
-      end).
-
-shard_decrease_spn_keep_queues_test() ->
-    with_ch(
-      fun (Ch) ->
-              exchange_op(Ch, x_declare(?TEST_X)),
-              set_pol("3_shard", "^sharding\\.", policy(5, "1234")),
-              ?assertEqual(5, length(queues(?RABBIT))),
-
-              set_pol("3_shard", "^sharding\\.", policy(3, "1234")),
-              ?assertEqual(5, length(queues(?RABBIT))),
-
-              teardown(Ch,
-                       [{?TEST_X, 5}],
-                       ["3_shard"])
-      end).
-
-
-%% changes the routing key policy, therefore the queues should be
-%% unbound first and then bound with the new routing key.
-shard_update_routing_key_test() ->
-    with_ch(
-      fun (Ch) ->
-              exchange_op(Ch, x_declare(?TEST_X)),
-              set_pol("rkey", "^sharding\\.", policy(3, "1234")),
-              Bs = bindings(?RABBIT, ?TEST_X),
-
-              set_pol("rkey", "^sharding\\.", policy(3, "4321")),
-              Bs2 = bindings(?RABBIT, ?TEST_X),
-
-              ?assert(Bs =/= Bs2),
-
-              teardown(Ch,
-                       [{?TEST_X, 1}],
-                       ["rkey"])
-      end).
-
-%% tests that the interceptor returns queue names
-%% sorted by consumer count and then by queue index.
-shard_basic_consume_interceptor_test() ->
-    with_ch(
-      fun (Ch) ->
-              Sh = ?TEST_X,
-              exchange_op(Ch, x_declare(Sh)),
-              set_pol("three", "^sharding\\.", policy(3, "1234")),
-
-              start_consumer(Ch, Sh),
-              assert_consumers(Sh, 0, 1),
-              assert_consumers(Sh, 1, 0),
-              assert_consumers(Sh, 2, 0),
-
-              start_consumer(Ch, Sh),
-              assert_consumers(Sh, 0, 1),
-              assert_consumers(Sh, 1, 1),
-              assert_consumers(Sh, 2, 0),
-
-              start_consumer(Ch, Sh),
-              assert_consumers(Sh, 0, 1),
-              assert_consumers(Sh, 1, 1),
-              assert_consumers(Sh, 2, 1),
-
-              start_consumer(Ch, Sh),
-              assert_consumers(Sh, 0, 2),
-              assert_consumers(Sh, 1, 1),
-              assert_consumers(Sh, 2, 1),
-
-              teardown(Ch,
-                       [{?TEST_X, 3}],
-                       ["three"])
-      end).
-
-shard_auto_scale_cluster_test() ->
-    with_ch(
-      fun (Ch) ->
-              Sh = ?TEST_X,
-              exchange_op(Ch, x_declare(Sh)),
-              set_pol("three", "^sharding\\.", policy(3, "1234")),
-
-              ?assertEqual(3, length(queues(?RABBIT))),
-
-              start_other_node(?HARE),
-              cluster_other_node(?HARE, ?RABBIT),
-
-              Qs = queues(?RABBIT),
-
-              ?assertEqual(6, length(Qs)),
-              ?assertEqual([nn(?HARE), nn(?RABBIT)], lists:usort(queue_nodes(Qs))),
-
-              reset_other_node(?HARE),
-              stop_other_node(?HARE),
-
-              teardown(Ch,
-                       [{?TEST_X, 3}],
-                       ["three"])
-      end).
-
-queue_declare_test() ->
-    with_ch(
-      fun (Ch) ->
-              exchange_op(Ch, x_declare(?TEST_X)),
-              set_pol("declare", "^sharding\\.", policy(3, "1234")),
-
-              Declare = #'queue.declare'{queue = <<"sharding.test">>,
-                                         auto_delete = false,
-                                         durable = true},
-
-              #'queue.declare_ok'{queue = Q} =
-                  amqp_channel:call(Ch, Declare),
-
-              ?assertEqual(Q, shard_q(xr(?TEST_X), 0)),
-
-              teardown(Ch,
-                       [{?TEST_X, 3}],
-                       ["declare"])
-      end).
-
-start_consumer(Ch, Shard) ->
-    amqp_channel:call(Ch, #'basic.consume'{queue = Shard}).
-
-assert_consumers(Shard, QInd, Count) ->
-    Q0 = qr(shard_q(xr(Shard), QInd)),
-    [{consumers, C0}] = rabbit_sharding_interceptor:consumer_count(Q0),
-    ?assertEqual(C0, Count).
-
-queues({Nodename, _}) ->
-    case rpc:call(n(Nodename), rabbit_amqqueue, list, [<<"/">>]) of
-        {badrpc, _} -> [];
-        Qs          -> Qs
-    end.
-
-bindings({Nodename, _}, XName) ->
-    case rpc:call(n(Nodename), rabbit_binding, list_for_source, [xr(XName)]) of
-        {badrpc, _} -> [];
-        Bs          -> Bs
-    end.
-
-with_ch(Fun) ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
-    Fun(Ch),
-    amqp_connection:close(Conn),
-    cleanup(?RABBIT),
-    ok.
-
-cleanup({Nodename, _} = Rabbit) ->
-    [rpc:call(n(Nodename), rabbit_amqqueue, delete, [Q, false, false])
-     || Q <- queues(Rabbit)].
-
-teardown(Ch, Xs, Policies) ->
-    [begin
-         exchange_op(Ch, x_delete(XName)),
-         delete_queues(Ch, XName, N)
-     end || {XName, N} <- Xs],
-    [clear_pol(Policy) || Policy <- Policies].
-
-delete_queues(Ch, Name, N) ->
-    [amqp_channel:call(Ch, q_delete(Name, QInd)) || QInd <- lists:seq(0, N-1)].
-
-exchange_op(Ch, Op) ->
-    amqp_channel:call(Ch, Op).
-
-x_declare(Name) -> x_declare(Name, <<"x-modulus-hash">>).
-
-x_declare(Name, Type) ->
-    #'exchange.declare'{exchange = Name,
-                        type     = Type,
-                        durable  = true}.
-
-x_delete(Name) ->
-    #'exchange.delete'{exchange = Name}.
-
-q_delete(Name, QInd) ->
-    #'queue.delete'{queue = shard_q(xr(Name), QInd)}.
-
-shard_q(X, N) ->
-    rabbit_sharding_util:make_queue_name(
-      exchange_bin(X), a2b(node()), N).
-
-policy(SPN) ->
-    Format = "{\"shards-per-node\": ~p}",
-    lists:flatten(io_lib:format(Format, [SPN])).
-
-policy(SPN, RK) ->
-    Format = "{\"shards-per-node\": ~p, \"routing-key\": ~p}",
-    lists:flatten(io_lib:format(Format, [SPN, RK])).
-
-queue_nodes(Qs) ->
-    [queue_node(Q) || Q <- Qs].
-
-queue_node(#amqqueue{pid = Pid}) ->
-    node(Pid).
-
-nn({Nodename, _}) ->
-    n(Nodename).
diff --git a/rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_sharding_test_all.erl b/rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_sharding_test_all.erl
deleted file mode 100644 (file)
index 6b5409e..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-%%   The contents of this file are subject to the Mozilla Public License
-%%   Version 1.1 (the "License"); you may not use this file except in
-%%   compliance with the License. You may obtain a copy of the License at
-%%   http://www.mozilla.org/MPL/
-%%
-%%   Software distributed under the License is distributed on an "AS IS"
-%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%%   License for the specific language governing rights and limitations
-%%   under the License.
-%%
-%%   The Original Code is RabbitMQ
-%%
-%%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2015 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(rabbit_sharding_test_all).
-
--export([all_tests/0]).
-
-all_tests() ->
-    ok = eunit:test(tests(rabbit_sharding_test, 60), [verbose]),
-    ok = eunit:test(tests(rabbit_hash_exchange_test, 60), [verbose]).
-
-tests(Module, Timeout) ->
-    {foreach, fun() -> ok end,
-     [{timeout, Timeout, fun () -> Module:F() end} ||
-         {F, _Arity} <- proplists:get_value(exports, Module:module_info()),
-         string:right(atom_to_list(F), 5) =:= "_test"]}.
diff --git a/rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_sharding_test_util.erl b/rabbitmq-server/deps/rabbitmq_sharding/test/src/rabbit_sharding_test_util.erl
deleted file mode 100644 (file)
index 2995075..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ Sharding Plugin
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(rabbit_sharding_test_util).
-
--include_lib("eunit/include/eunit.hrl").
--include_lib("amqp_client/include/amqp_client.hrl").
-
--compile(export_all).
-
-set_param(Component, Name, Value) ->
-    rabbitmqctl(fmt("set_parameter ~s ~s '~s'", [Component, Name, Value])).
-
-clear_param(Component, Name) ->
-    rabbitmqctl(fmt("clear_parameter ~s ~s", [Component, Name])).
-
-set_pol(Name, Pattern, Defn) ->
-    rabbitmqctl(fmt("set_policy ~s \"~s\" '~s'", [Name, Pattern, Defn])).
-
-clear_pol(Name) ->
-    rabbitmqctl(fmt("clear_policy ~s ", [Name])).
-
-fmt(Fmt, Args) ->
-    string:join(string:tokens(rabbit_misc:format(Fmt, Args), [$\n]), " ").
-
-start_other_node({Name, Port}) ->
-    start_other_node({Name, Port}, "rabbit-" ++ Name).
-
-start_other_node({Name, Port}, Config) ->
-    start_other_node({Name, Port}, Config,
-                     os:getenv("RABBITMQ_ENABLED_PLUGINS_FILE")).
-
-start_other_node({Name, Port}, Config, PluginsFile) ->
-    make("OTHER_NODE=" ++ Name ++
-         " OTHER_PORT=" ++ integer_to_list(Port) ++
-         " OTHER_CONFIG=" ++ Config ++
-         " OTHER_PLUGINS=" ++ PluginsFile ++
-         " start-other-node").
-
-stop_other_node({Name, _Port}) ->
-    make("OTHER_NODE=" ++ Name ++
-         " stop-other-node").
-
-reset_other_node({Name, _Port}) ->
-    make("OTHER_NODE=" ++ Name ++
-         " reset-other-node").
-
-cluster_other_node({Name, _Port}, {MainName, _Port2}) ->
-    make("OTHER_NODE=" ++ Name ++
-         " MAIN_NODE=" ++ atom_to_list(n(MainName)) ++
-         " cluster-other-node").
-
-make(Args) ->
-    Make = case os:getenv("MAKE") of
-        false -> "make";
-        M     -> M
-    end,
-    execute(Make ++ " " ++ Args),
-    timer:sleep(1000).
-
-rabbitmqctl(Args) ->
-    execute(os:getenv("RABBITMQCTL") ++ " " ++ Args),
-    timer:sleep(100).
-
-execute(Cmd) ->
-    Res = os:cmd(Cmd ++ " ; echo $?"),
-    case lists:reverse(string:tokens(Res, "\n")) of
-        ["0" | _] -> ok;
-        _         -> exit({command_failed, Cmd, Res})
-    end.
-
-xr(Name) -> rabbit_misc:r(<<"/">>, exchange, Name).
-qr(Name) -> rabbit_misc:r(<<"/">>, queue, Name).
-
-n(Nodename) ->
-    {_, NodeHost} = rabbit_nodes:parts(node()),
-    rabbit_nodes:make({Nodename, NodeHost}).
diff --git a/rabbitmq-server/deps/rabbitmq_shovel/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_shovel/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index 38ec4a45e6893db71d531347fb322e8ce18dcace..7728a5a381979cf8b0eaa144f89d079a9eac8282 100644 (file)
@@ -1,6 +1,7 @@
 PROJECT = rabbitmq_shovel
 
 DEPS = amqp_client
+TEST_DEPS += rabbit
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
@@ -11,6 +12,8 @@ ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
 ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
-include erlang.mk
 
-WITH_BROKER_TEST_COMMANDS := rabbit_shovel_test_all:all_tests()
+# FIXME: Remove rabbitmq_test as TEST_DEPS from here for now.
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
+
+include erlang.mk
index 615c5a8f0f4a77f1090f4365405078aacde54889..f2eb61ac02c31d5819ad0ae8cb27042466580585 100644 (file)
@@ -19,4 +19,4 @@ See [RabbitMQ shovel plugin](http://www.rabbitmq.com/shovel.html) on rabbitmq.co
 
 Released under [the same license as RabbitMQ](https://www.rabbitmq.com/mpl.html).
 
-2007-2015 (c) Pivotal Software Inc.
+2007-2016 (c) Pivotal Software Inc.
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index 621a5cb8bbe1fc2c81f39f2244e25bbc5dea8432..0f6ee877b17a587b2c6b06a5774000365e178a38 100644 (file)
@@ -44,7 +44,7 @@ start_child(Name, Def) ->
     case mirrored_supervisor:start_child(
            ?SUPERVISOR,
            {Name, {rabbit_shovel_dyn_worker_sup, start_link, [Name, Def]},
-            transient, ?MAX_WAIT, worker, [rabbit_shovel_dyn_worker_sup]}) of
+            transient, ?WORKER_WAIT, worker, [rabbit_shovel_dyn_worker_sup]}) of
         {ok,                      _Pid}  -> ok;
         {error, {already_started, _Pid}} -> ok
     end.
index 4243fdbd6709cd2cc52baec946ebea364cbce2ef..d41c84366fb7f28c964eeebfd24f0c8af93cd21a 100644 (file)
@@ -54,9 +54,6 @@ handle_call(_Msg, _From, State) ->
     {noreply, State}.
 
 handle_cast(init, State = #state{config = Config}) ->
-    random:seed(erlang:phash2([node()]),
-                time_compat:monotonic_time(),
-                time_compat:unique_integer()),
     #shovel{sources = Sources, destinations = Destinations} = Config,
     {InboundConn, InboundChan, InboundURI} =
         make_conn_and_chan(Sources#endpoint.uris),
@@ -231,7 +228,7 @@ publish(Tag, Method, Msg,
       end).
 
 make_conn_and_chan(URIs) ->
-    URI = lists:nth(random:uniform(length(URIs)), URIs),
+    URI = lists:nth(rand_compat:uniform(length(URIs)), URIs),
     {ok, AmqpParam} = amqp_uri:parse(URI),
     {ok, Conn} = amqp_connection:start(AmqpParam),
     link(Conn),
index 8e27738033347280f91cea4818c8b7d7c59c62e2..00975f3c97b7d65380b2b1903af5eeda63c77e6c 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_shovel,
  [{description, "Data Shovel for RabbitMQ"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {env, [{defaults, [{prefetch_count,     1000},
similarity index 71%
rename from rabbitmq-server/deps/rabbitmq_shovel/test/src/rabbit_shovel_test.erl
rename to rabbitmq-server/deps/rabbitmq_shovel/test/configuration_SUITE.erl
index 892b80ddeaa556336f87d8ef6029be0c7173f4c7..966c3f89b96e685d779c2b16571ba38784bb3ab2 100644 (file)
@@ -1,23 +1,25 @@
-%%  The contents of this file are subject to the Mozilla Public License
-%%  Version 1.1 (the "License"); you may not use this file except in
-%%  compliance with the License. You may obtain a copy of the License
-%%  at http://www.mozilla.org/MPL/
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
 %%
-%%  Software distributed under the License is distributed on an "AS IS"
-%%  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%%  the License for the specific language governing rights and
-%%  limitations under the License.
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
 %%
-%%  The Original Code is RabbitMQ.
+%%   The Original Code is RabbitMQ
 %%
-%%  The Initial Developer of the Original Code is GoPivotal, Inc.
-%%  Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%   The Initial Developer of the Original Code is GoPivotal, Inc.
+%%   Copyright (c) 2010-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_shovel_test).
--export([test/0]).
+-module(configuration_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
--include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
 
 -define(EXCHANGE,    <<"test_exchange">>).
 -define(TO_SHOVEL,   <<"to_the_shovel">>).
 -define(SHOVELLED,   <<"shovelled">>).
 -define(TIMEOUT,     1000).
 
-main_test() ->
-    %% it may already be running. Stop if possible
-    application:stop(rabbitmq_shovel),
-
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+          zero_shovels,
+          invalid_configuration,
+          valid_configuration
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE}
+      ]),
+    Config2 = rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()),
+    ok = rabbit_ct_broker_helpers:rpc(Config2, 0,
+      application, stop, [rabbitmq_shovel]),
+    Config2.
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+zero_shovels(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, zero_shovels1, [Config]).
+
+zero_shovels1(_Config) ->
     %% shovel can be started with zero shovels configured
     ok = application:start(rabbitmq_shovel),
     ok = application:stop(rabbitmq_shovel),
+    passed.
 
+invalid_configuration(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, invalid_configuration1, [Config]).
+
+invalid_configuration1(_Config) ->
     %% various ways of breaking the config
     require_list_of_shovel_configurations =
         test_broken_shovel_configs(invalid_config),
@@ -133,37 +193,29 @@ main_test() ->
       {require_boolean, '42'}}, _} =
         test_broken_shovel_sources([{broker, "amqps://username:password@host:5673/vhost?cacertfile=/path/to/cacert.pem&certfile=/path/to/certfile.pem&keyfile=/path/to/keyfile.pem&verify=verify_peer&fail_if_no_peer_cert=42"}]),
 
-    %% a working config
-    application:set_env(
-      rabbitmq_shovel,
-      shovels,
-      [{test_shovel,
-        [{sources,
-          [{broker, "amqp:///%2f?heartbeat=5"},
-           {declarations,
-            [{'queue.declare',    [exclusive, auto_delete]},
-             {'exchange.declare', [{exchange, ?EXCHANGE}, auto_delete]},
-             {'queue.bind',       [{queue, <<>>}, {exchange, ?EXCHANGE},
-                                   {routing_key, ?TO_SHOVEL}]}
-            ]}]},
-         {destinations,
-          [{broker, "amqp:///%2f"}]},
-         {queue, <<>>},
-         {ack_mode, on_confirm},
-         {publish_fields, [{exchange, ?EXCHANGE}, {routing_key, ?FROM_SHOVEL}]},
-         {publish_properties, [{delivery_mode, 2},
-                               {cluster_id,    <<"my-cluster">>},
-                               {content_type,  ?SHOVELLED}]},
-         {add_forward_headers, true}
-        ]}],
-      infinity),
+    passed.
 
-    ok = application:start(rabbitmq_shovel),
+test_broken_shovel_configs(Configs) ->
+    application:set_env(rabbitmq_shovel, shovels, Configs),
+    {error, {Error, _}} = application:start(rabbitmq_shovel),
+    Error.
 
-    await_running_shovel(test_shovel),
+test_broken_shovel_config(Config) ->
+    {invalid_shovel_configuration, test_shovel, Error} =
+        test_broken_shovel_configs([{test_shovel, Config}]),
+    Error.
 
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Chan} = amqp_connection:open_channel(Conn),
+test_broken_shovel_sources(Sources) ->
+    {invalid_parameter_value, sources, Error} =
+        test_broken_shovel_config([{sources, Sources},
+                                   {destinations, [{broker, "amqp://"}]},
+                                   {queue, <<"">>}]),
+    Error.
+
+valid_configuration(Config) ->
+    ok = setup_shovels(Config),
+
+    Chan = rabbit_ct_client_helpers:open_channel(Config, 0),
 
     #'queue.declare_ok'{ queue = Q } =
         amqp_channel:call(Chan, #'queue.declare' { exclusive = true }),
@@ -206,7 +258,8 @@ main_test() ->
     end,
 
     [{test_shovel, static, {running, _Info}, _Time}] =
-        rabbit_shovel_status:status(),
+        rabbit_ct_broker_helpers:rpc(Config, 0,
+          rabbit_shovel_status, status, []),
 
     receive
         {#'basic.deliver' { consumer_tag = CTag, delivery_tag = AckTag1,
@@ -219,31 +272,50 @@ main_test() ->
     after ?TIMEOUT -> throw(timeout_waiting_for_deliver2)
     end,
 
-    amqp_channel:close(Chan),
-    amqp_connection:close(Conn),
-
-    ok.
+    rabbit_ct_client_helpers:close_channel(Chan).
 
-test_broken_shovel_configs(Configs) ->
-    application:set_env(rabbitmq_shovel, shovels, Configs),
-    {error, {Error, _}} = application:start(rabbitmq_shovel),
-    Error.
+setup_shovels(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, setup_shovels1, [Config]).
 
-test_broken_shovel_config(Config) ->
-    {invalid_shovel_configuration, test_shovel, Error} =
-        test_broken_shovel_configs([{test_shovel, Config}]),
-    Error.
+setup_shovels1(Config) ->
+    Hostname = ?config(rmq_hostname, Config),
+    TcpPort = rabbit_ct_broker_helpers:get_node_config(Config, 0,
+      tcp_port_amqp),
+    %% a working config
+    application:set_env(
+      rabbitmq_shovel,
+      shovels,
+      [{test_shovel,
+        [{sources,
+          [{broker, rabbit_misc:format("amqp://~s:~b/%2f?heartbeat=5",
+                                       [Hostname, TcpPort])},
+           {declarations,
+            [{'queue.declare',    [exclusive, auto_delete]},
+             {'exchange.declare', [{exchange, ?EXCHANGE}, auto_delete]},
+             {'queue.bind',       [{queue, <<>>}, {exchange, ?EXCHANGE},
+                                   {routing_key, ?TO_SHOVEL}]}
+            ]}]},
+         {destinations,
+          [{broker, rabbit_misc:format("amqp://~s:~b/%2f",
+                                       [Hostname, TcpPort])}]},
+         {queue, <<>>},
+         {ack_mode, on_confirm},
+         {publish_fields, [{exchange, ?EXCHANGE}, {routing_key, ?FROM_SHOVEL}]},
+         {publish_properties, [{delivery_mode, 2},
+                               {cluster_id,    <<"my-cluster">>},
+                               {content_type,  ?SHOVELLED}]},
+         {add_forward_headers, true}
+        ]}],
+      infinity),
 
-test_broken_shovel_sources(Sources) ->
-    {invalid_parameter_value, sources, Error} =
-        test_broken_shovel_config([{sources, Sources},
-                                   {destinations, [{broker, "amqp://"}]},
-                                   {queue, <<"">>}]),
-    Error.
+    ok = application:start(rabbitmq_shovel),
+    await_running_shovel(test_shovel).
 
 await_running_shovel(Name) ->
-    case [Name || {Name, _, {running, _}, _}
-                      <- rabbit_shovel_status:status()] of
+    case [N || {N, _, {running, _}, _}
+                      <- rabbit_shovel_status:status(),
+                         N =:= Name] of
         [_] -> ok;
         _   -> timer:sleep(100),
                await_running_shovel(Name)
similarity index 51%
rename from rabbitmq-server/deps/rabbitmq_shovel/test/src/rabbit_shovel_test_dyn.erl
rename to rabbitmq-server/deps/rabbitmq_shovel/test/dynamic_SUITE.erl
index b6571e70784a2c7642d8b7b84f7a8ccad8ac4587..0a902844753d5016bed6e649227671c7094ccc96 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_shovel_test_dyn).
+-module(dynamic_SUITE).
 
--include_lib("eunit/include/eunit.hrl").
+-include_lib("common_test/include/ct.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
 
--import(rabbit_misc, [pget/2]).
-
-simple_test() ->
-    with_ch(
+-compile(export_all).
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+          simple,
+          set_properties,
+          exchange,
+          restart,
+          change_definition,
+          autodelete,
+          validation,
+          security_validation
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE}
+      ]),
+    Config2 = rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()),
+    Config2.
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+simple(Config) ->
+    with_ch(Config,
       fun (Ch) ->
-              set_param(<<"test">>, [{<<"src-queue">>,  <<"src">>},
+              set_param(Config,
+                        <<"test">>, [{<<"src-queue">>,  <<"src">>},
                                      {<<"dest-queue">>, <<"dest">>}]),
               publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hello">>)
       end).
 
-set_properties_test() ->
-    with_ch(
+set_properties(Config) ->
+    with_ch(Config,
       fun (Ch) ->
               Ps = [{<<"src-queue">>,      <<"src">>},
                     {<<"dest-queue">>,     <<"dest">>},
                     {<<"publish-properties">>, [{<<"cluster_id">>, <<"x">>}]}],
-              set_param(<<"test">>, Ps),
+              set_param(Config, <<"test">>, Ps),
               #amqp_msg{props = #'P_basic'{cluster_id = Cluster}} =
                   publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hi">>),
-              ?assertEqual(<<"x">>, Cluster)
+              <<"x">> = Cluster
       end).
 
-exchange_test() ->
-    with_ch(
+exchange(Config) ->
+    with_ch(Config,
       fun (Ch) ->
               amqp_channel:call(Ch, #'queue.declare'{queue   = <<"queue">>,
                                                      durable = true}),
@@ -50,12 +105,14 @@ exchange_test() ->
                 Ch, #'queue.bind'{queue       = <<"queue">>,
                                   exchange    = <<"amq.topic">>,
                                   routing_key = <<"test-key">>}),
-              set_param(<<"test">>, [{<<"src-exchange">>,    <<"amq.direct">>},
+              set_param(Config,
+                        <<"test">>, [{<<"src-exchange">>,    <<"amq.direct">>},
                                      {<<"src-exchange-key">>,<<"test-key">>},
                                      {<<"dest-exchange">>,   <<"amq.topic">>}]),
               publish_expect(Ch, <<"amq.direct">>, <<"test-key">>,
                              <<"queue">>, <<"hello">>),
-              set_param(<<"test">>, [{<<"src-exchange">>,     <<"amq.direct">>},
+              set_param(Config,
+                        <<"test">>, [{<<"src-exchange">>,     <<"amq.direct">>},
                                      {<<"src-exchange-key">>, <<"test-key">>},
                                      {<<"dest-exchange">>,    <<"amq.topic">>},
                                      {<<"dest-exchange-key">>,<<"new-key">>}]),
@@ -69,118 +126,151 @@ exchange_test() ->
                              <<"queue">>, <<"hello">>)
       end).
 
-restart_test() ->
-    with_ch(
+restart(Config) ->
+    with_ch(Config,
       fun (Ch) ->
-              set_param(<<"test">>, [{<<"src-queue">>,  <<"src">>},
+              set_param(Config,
+                        <<"test">>, [{<<"src-queue">>,  <<"src">>},
                                      {<<"dest-queue">>, <<"dest">>}]),
               %% The catch is because connections link to the shovel,
               %% so one connection will die, kill the shovel, kill
               %% the other connection, then we can't close it
-              [catch amqp_connection:close(C) || C <- rabbit_direct:list()],
+              Conns = rabbit_ct_broker_helpers:rpc(Config, 0,
+                rabbit_direct, list, []),
+              [catch amqp_connection:close(C) || C <- Conns],
               publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hello">>)
       end).
 
-change_definition_test() ->
-    with_ch(
+change_definition(Config) ->
+    with_ch(Config,
       fun (Ch) ->
-              set_param(<<"test">>, [{<<"src-queue">>,  <<"src">>},
+              set_param(Config,
+                        <<"test">>, [{<<"src-queue">>,  <<"src">>},
                                      {<<"dest-queue">>, <<"dest">>}]),
               publish_expect(Ch, <<>>, <<"src">>, <<"dest">>, <<"hello">>),
-              set_param(<<"test">>, [{<<"src-queue">>,  <<"src">>},
+              set_param(Config,
+                        <<"test">>, [{<<"src-queue">>,  <<"src">>},
                                      {<<"dest-queue">>, <<"dest2">>}]),
               publish_expect(Ch, <<>>, <<"src">>, <<"dest2">>, <<"hello">>),
               expect_empty(Ch, <<"dest">>),
-              clear_param(<<"test">>),
+              clear_param(Config, <<"test">>),
               publish_expect(Ch, <<>>, <<"src">>, <<"src">>, <<"hello">>),
               expect_empty(Ch, <<"dest">>),
               expect_empty(Ch, <<"dest2">>)
       end).
 
-autodelete_test_() ->
-    [autodelete_case({<<"on-confirm">>, <<"queue-length">>,  0, 100}),
-     autodelete_case({<<"on-confirm">>, 50,                 50,  50}),
-     autodelete_case({<<"on-publish">>, <<"queue-length">>,  0, 100}),
-     autodelete_case({<<"on-publish">>, 50,                 50,  50}),
-     %% no-ack is not compatible with explicit count
-     autodelete_case({<<"no-ack">>,     <<"queue-length">>,  0, 100})].
+autodelete(Config) ->
+    autodelete_case(Config, {<<"on-confirm">>, <<"queue-length">>,  0, 100}),
+    autodelete_case(Config, {<<"on-confirm">>, 50,                 50,  50}),
+    autodelete_case(Config, {<<"on-publish">>, <<"queue-length">>,  0, 100}),
+    autodelete_case(Config, {<<"on-publish">>, 50,                 50,  50}),
+    %% no-ack is not compatible with explicit count
+    autodelete_case(Config, {<<"no-ack">>,     <<"queue-length">>,  0, 100}).
 
-autodelete_case(Args) ->
-    fun () -> with_ch(autodelete_do(Args)) end.
+autodelete_case(Config, Args) ->
+    with_ch(Config, autodelete_do(Config, Args)).
 
-autodelete_do({AckMode, After, ExpSrc, ExpDest}) ->
+autodelete_do(Config, {AckMode, After, ExpSrc, ExpDest}) ->
     fun (Ch) ->
             amqp_channel:call(Ch, #'confirm.select'{}),
             amqp_channel:call(Ch, #'queue.declare'{queue = <<"src">>}),
             publish_count(Ch, <<>>, <<"src">>, <<"hello">>, 100),
             amqp_channel:wait_for_confirms(Ch),
-            set_param_nowait(<<"test">>, [{<<"src-queue">>,    <<"src">>},
+            set_param_nowait(Config,
+                             <<"test">>, [{<<"src-queue">>,    <<"src">>},
                                           {<<"dest-queue">>,   <<"dest">>},
                                           {<<"ack-mode">>,     AckMode},
                                           {<<"delete-after">>, After}]),
-            await_autodelete(<<"test">>),
+            await_autodelete(Config, <<"test">>),
             expect_count(Ch, <<"src">>, <<"hello">>, ExpSrc),
             expect_count(Ch, <<"dest">>, <<"hello">>, ExpDest)
     end.
 
-validation_test() ->
+validation(Config) ->
     URIs = [{<<"src-uri">>,  <<"amqp://">>},
             {<<"dest-uri">>, <<"amqp://">>}],
 
     %% Need valid src and dest URIs
-    invalid_param([]),
-    invalid_param([{<<"src-queue">>, <<"test">>},
+    invalid_param(Config, []),
+    invalid_param(Config,
+                  [{<<"src-queue">>, <<"test">>},
                    {<<"src-uri">>,   <<"derp">>},
                    {<<"dest-uri">>,  <<"amqp://">>}]),
-    invalid_param([{<<"src-queue">>, <<"test">>},
+    invalid_param(Config,
+                  [{<<"src-queue">>, <<"test">>},
                    {<<"src-uri">>,   [<<"derp">>]},
                    {<<"dest-uri">>,  <<"amqp://">>}]),
-    invalid_param([{<<"src-queue">>, <<"test">>},
+    invalid_param(Config,
+                  [{<<"src-queue">>, <<"test">>},
                    {<<"dest-uri">>,  <<"amqp://">>}]),
 
     %% Also need src exchange or queue
-    invalid_param(URIs),
-    valid_param([{<<"src-exchange">>, <<"test">>} | URIs]),
+    invalid_param(Config,
+                  URIs),
+    valid_param(Config,
+                [{<<"src-exchange">>, <<"test">>} | URIs]),
     QURIs =     [{<<"src-queue">>,    <<"test">>} | URIs],
-    valid_param(QURIs),
+    valid_param(Config, QURIs),
 
     %% But not both
-    invalid_param([{<<"src-exchange">>, <<"test">>} | QURIs]),
+    invalid_param(Config,
+                  [{<<"src-exchange">>, <<"test">>} | QURIs]),
 
     %% Check these are of right type
-    invalid_param([{<<"prefetch-count">>,  <<"three">>} | QURIs]),
-    invalid_param([{<<"reconnect-delay">>, <<"three">>} | QURIs]),
-    invalid_param([{<<"ack-mode">>,        <<"whenever">>} | QURIs]),
-    invalid_param([{<<"delete-after">>,    <<"whenever">>} | QURIs]),
+    invalid_param(Config,
+                  [{<<"prefetch-count">>,  <<"three">>} | QURIs]),
+    invalid_param(Config,
+                  [{<<"reconnect-delay">>, <<"three">>} | QURIs]),
+    invalid_param(Config,
+                  [{<<"ack-mode">>,        <<"whenever">>} | QURIs]),
+    invalid_param(Config,
+                  [{<<"delete-after">>,    <<"whenever">>} | QURIs]),
 
     %% Check properties have to look property-ish
-    invalid_param([{<<"publish-properties">>, [{<<"nonexistent">>, <<>>}]}]),
-    invalid_param([{<<"publish-properties">>, [{<<"cluster_id">>, 2}]}]),
-    invalid_param([{<<"publish-properties">>, <<"something">>}]),
+    invalid_param(Config,
+                  [{<<"publish-properties">>, [{<<"nonexistent">>, <<>>}]}]),
+    invalid_param(Config,
+                  [{<<"publish-properties">>, [{<<"cluster_id">>, 2}]}]),
+    invalid_param(Config,
+                  [{<<"publish-properties">>, <<"something">>}]),
 
     %% Can't use explicit message count and no-ack together
-    invalid_param([{<<"delete-after">>,    1},
+    invalid_param(Config,
+                  [{<<"delete-after">>,    1},
                    {<<"ack-mode">>,        <<"no-ack">>} | QURIs]),
     ok.
 
-security_validation_test() ->
-    [begin
-         rabbit_vhost:add(U),
-         rabbit_auth_backend_internal:add_user(U, <<>>),
-         rabbit_auth_backend_internal:set_permissions(
-           U, U, <<".*">>, <<".*">>, <<".*">>)
-     end || U <- [<<"a">>, <<"b">>]],
+security_validation(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, security_validation_add_user, []),
 
     Qs = [{<<"src-queue">>, <<"test">>},
           {<<"dest-queue">>, <<"test2">>}],
 
-    A = lookup_user(<<"a">>),
-    valid_param([{<<"src-uri">>,  <<"amqp:///a">>},
+    A = lookup_user(Config, <<"a">>),
+    valid_param(Config, [{<<"src-uri">>,  <<"amqp:///a">>},
                  {<<"dest-uri">>, <<"amqp:///a">>} | Qs], A),
-    invalid_param([{<<"src-uri">>,  <<"amqp:///a">>},
+    invalid_param(Config,
+                  [{<<"src-uri">>,  <<"amqp:///a">>},
                    {<<"dest-uri">>, <<"amqp:///b">>} | Qs], A),
-    invalid_param([{<<"src-uri">>,  <<"amqp:///b">>},
+    invalid_param(Config,
+                  [{<<"src-uri">>,  <<"amqp:///b">>},
                    {<<"dest-uri">>, <<"amqp:///a">>} | Qs], A),
+
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, security_validation_remove_user, []),
+    ok.
+
+security_validation_add_user() ->
+    [begin
+         rabbit_vhost:add(U),
+         rabbit_auth_backend_internal:add_user(U, <<>>),
+         rabbit_auth_backend_internal:set_permissions(
+           U, U, <<".*">>, <<".*">>, <<".*">>)
+     end || U <- [<<"a">>, <<"b">>]],
+    ok.
+
+security_validation_remove_user() ->
     [begin
          rabbit_vhost:delete(U),
          rabbit_auth_backend_internal:delete_user(U)
@@ -189,12 +279,11 @@ security_validation_test() ->
 
 %%----------------------------------------------------------------------------
 
-with_ch(Fun) ->
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
+with_ch(Config, Fun) ->
+    Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
     Fun(Ch),
-    amqp_connection:close(Conn),
-    cleanup(),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    cleanup(Config),
     ok.
 
 publish(Ch, X, Key, Payload) when is_binary(Payload) ->
@@ -211,8 +300,8 @@ publish_expect(Ch, X, Key, Q, Payload) ->
 expect(Ch, Q, Payload) ->
     amqp_channel:subscribe(Ch, #'basic.consume'{queue  = Q,
                                                 no_ack = true}, self()),
-    receive
-        #'basic.consume_ok'{consumer_tag = CTag} -> ok
+    CTag = receive
+        #'basic.consume_ok'{consumer_tag = CT} -> CT
     end,
     Msg = receive
               {#'basic.deliver'{}, #amqp_msg{payload = Payload} = M} ->
@@ -224,8 +313,7 @@ expect(Ch, Q, Payload) ->
     Msg.
 
 expect_empty(Ch, Q) ->
-    ?assertMatch(#'basic.get_empty'{},
-                 amqp_channel:call(Ch, #'basic.get'{ queue = Q })).
+    #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{ queue = Q }).
 
 publish_count(Ch, X, Key, M, Count) ->
     [publish(Ch, X, Key, M) || _ <- lists:seq(1, Count)].
@@ -234,46 +322,66 @@ expect_count(Ch, Q, M, Count) ->
     [expect(Ch, Q, M) || _ <- lists:seq(1, Count)],
     expect_empty(Ch, Q).
 
-set_param(Name, Value) ->
-    set_param_nowait(Name, Value),
-    await_shovel(Name).
+set_param(Config, Name, Value) ->
+    set_param_nowait(Config, Name, Value),
+    await_shovel(Config, Name).
 
-set_param_nowait(Name, Value) ->
-    ok = rabbit_runtime_parameters:set(
-           <<"/">>, <<"shovel">>, Name, [{<<"src-uri">>,  <<"amqp://">>},
-                                         {<<"dest-uri">>, [<<"amqp://">>]} |
-                                         Value], none).
+set_param_nowait(Config, Name, Value) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+      rabbit_runtime_parameters, set, [
+        <<"/">>, <<"shovel">>, Name, [{<<"src-uri">>,  <<"amqp://">>},
+                                      {<<"dest-uri">>, [<<"amqp://">>]} |
+                                      Value], none]).
+
+invalid_param(Config, Value, User) ->
+    {error_string, _} = rabbit_ct_broker_helpers:rpc(Config, 0,
+      rabbit_runtime_parameters, set,
+      [<<"/">>, <<"shovel">>, <<"invalid">>, Value, User]).
 
-invalid_param(Value, User) ->
-    {error_string, _} = rabbit_runtime_parameters:set(
-                          <<"/">>, <<"shovel">>, <<"invalid">>, Value, User).
+valid_param(Config, Value, User) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, valid_param1, [Config, Value, User]).
 
-valid_param(Value, User) ->
+valid_param1(_Config, Value, User) ->
     ok = rabbit_runtime_parameters:set(
            <<"/">>, <<"shovel">>, <<"a">>, Value, User),
     ok = rabbit_runtime_parameters:clear(<<"/">>, <<"shovel">>, <<"a">>).
 
-invalid_param(Value) -> invalid_param(Value, none).
-valid_param(Value) -> valid_param(Value, none).
+invalid_param(Config, Value) -> invalid_param(Config, Value, none).
+valid_param(Config, Value) -> valid_param(Config, Value, none).
 
-lookup_user(Name) ->
-    {ok, User} = rabbit_access_control:check_user_login(Name, []),
+lookup_user(Config, Name) ->
+    {ok, User} = rabbit_ct_broker_helpers:rpc(Config, 0,
+      rabbit_access_control, check_user_login, [Name, []]),
     User.
 
-clear_param(Name) ->
-    rabbit_runtime_parameters:clear(<<"/">>, <<"shovel">>, Name).
+clear_param(Config, Name) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0,
+      rabbit_runtime_parameters, clear, [<<"/">>, <<"shovel">>, Name]).
+
+cleanup(Config) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, cleanup1, [Config]).
 
-cleanup() ->
-    [rabbit_runtime_parameters:clear(pget(vhost, P),
-                                     pget(component, P),
-                                     pget(name, P)) ||
+cleanup1(_Config) ->
+    [rabbit_runtime_parameters:clear(rabbit_misc:pget(vhost, P),
+                                     rabbit_misc:pget(component, P),
+                                     rabbit_misc:pget(name, P)) ||
         P <- rabbit_runtime_parameters:list()],
     [rabbit_amqqueue:delete(Q, false, false) || Q <- rabbit_amqqueue:list()].
 
-await_shovel(Name) ->
+await_shovel(Config, Name) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, await_shovel1, [Config, Name]).
+
+await_shovel1(_Config, Name) ->
     await(fun () -> lists:member(Name, shovels_from_status()) end).
 
-await_autodelete(Name) ->
+await_autodelete(Config, Name) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, await_autodelete1, [Config, Name]).
+
+await_autodelete1(_Config, Name) ->
     await(fun () -> not lists:member(Name, shovels_from_parameters()) end),
     await(fun () -> not lists:member(Name, shovels_from_status()) end).
 
@@ -290,4 +398,4 @@ shovels_from_status() ->
 
 shovels_from_parameters() ->
     L = rabbit_runtime_parameters:list(<<"/">>, <<"shovel">>),
-    [pget(name, Shovel) || Shovel <- L].
+    [rabbit_misc:pget(name, Shovel) || Shovel <- L].
diff --git a/rabbitmq-server/deps/rabbitmq_shovel/test/src/rabbit_shovel_test_all.erl b/rabbitmq-server/deps/rabbitmq_shovel/test/src/rabbit_shovel_test_all.erl
deleted file mode 100644 (file)
index 2269ea8..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-%%   The contents of this file are subject to the Mozilla Public License
-%%   Version 1.1 (the "License"); you may not use this file except in
-%%   compliance with the License. You may obtain a copy of the License at
-%%   http://www.mozilla.org/MPL/
-%%
-%%   Software distributed under the License is distributed on an "AS IS"
-%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%%   License for the specific language governing rights and limitations
-%%   under the License.
-%%
-%%   The Original Code is RabbitMQ
-%%
-%%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2015 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(rabbit_shovel_test_all).
-
--export([all_tests/0]).
-
-all_tests() ->
-    ok = eunit:test(tests(rabbit_shovel_test, 60), [verbose]),
-    ok = eunit:test(tests(rabbit_shovel_test_dyn, 60), [verbose]).
-
-tests(Module, Timeout) ->
-    {foreach, fun() -> ok end,
-     [{timeout, Timeout, fun () -> Module:F() end} || F <- funs(Module, "_test")] ++
-         [{timeout, Timeout, Fun} || Gen <- funs(Module, "_test_"),
-                                     Fun <- Module:Gen()]}.
-
-funs(Module, Suffix) ->
-    [F || {F, _Arity} <- proplists:get_value(exports, Module:module_info()),
-          string:right(atom_to_list(F), length(Suffix)) =:= Suffix].
diff --git a/rabbitmq-server/deps/rabbitmq_shovel_management/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_shovel_management/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index cb0830b3e47a716f94e6cb79354ae6ed5f223e9b..5063da202e323ccc912dc2f0659786267d3f92e3 100644 (file)
@@ -1,6 +1,7 @@
 PROJECT = rabbitmq_shovel_management
 
 DEPS = rabbitmq_management rabbitmq_shovel webmachine
+TEST_DEPS += rabbit
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
@@ -11,8 +12,8 @@ ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
 ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
-include erlang.mk
 
-WITH_BROKER_TEST_MAKEVARS := \
-               RABBITMQ_CONFIG_FILE=$(CURDIR)/etc/rabbit-test
-WITH_BROKER_TEST_COMMANDS := rabbit_shovel_mgmt_test_all:all_tests()
+# FIXME: Remove rabbitmq_test as TEST_DEPS from here for now.
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
+
+include erlang.mk
diff --git a/rabbitmq-server/deps/rabbitmq_shovel_management/README b/rabbitmq-server/deps/rabbitmq_shovel_management/README
deleted file mode 100644 (file)
index 8d6d3cc..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-Adds information on shovel status to the management plugin. Build it
-like any other plugin.
-
-If you have a heterogenous cluster (where the nodes have different
-plugins installed), this should be installed on the same nodes as the
-management plugin.
-
-Strictly speaking the shovel does not need to be installed, but then
-it won't tell you much.
-
-The HTTP API is very simple: GET /api/shovels.
diff --git a/rabbitmq-server/deps/rabbitmq_shovel_management/README.md b/rabbitmq-server/deps/rabbitmq_shovel_management/README.md
new file mode 100644 (file)
index 0000000..7d68155
--- /dev/null
@@ -0,0 +1,34 @@
+# RabbitMQ Shovel Management Plugin
+
+Adds information on shovel status to the management plugin. Build it
+like any other plugin.
+
+If you have a heterogenous cluster (where the nodes have different
+plugins installed), this should be installed on the same nodes as the
+management plugin.
+
+
+## Installing
+
+This plugin ships with RabbitMQ. Enable it with
+
+```
+[sudo] rabbitmq-plugins rabbitmq_shovel_management
+```
+
+
+## Usage
+
+When the plugin is enabled, you'll find a shovel management
+link under the Admin tab.
+
+The HTTP API is very small:
+
+ * `GET /api/shovels`
+
+
+## License and Copyright
+
+Released under [the same license as RabbitMQ](https://www.rabbitmq.com/mpl.html).
+
+2007-2016 (c) Pivotal Software Inc.
index 9f0c0c38494c4beabf27ccddfa996d51d66a91d8..f7ca7bebb76849368b9a6bf56f1bdea9e847604d 100644 (file)
@@ -16,7 +16,7 @@
 
 ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
 
-ERLANG_MK_VERSION = 2.0.0-pre.2-16-gb52203c-dirty
+ERLANG_MK_VERSION = 2.0.0-pre.2-132-g62d576b
 
 # Core configuration.
 
@@ -24,6 +24,7 @@ PROJECT ?= $(notdir $(CURDIR))
 PROJECT := $(strip $(PROJECT))
 
 PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
 
 # Verbosity.
 
@@ -84,7 +85,7 @@ all:: deps app rel
 rel::
        $(verbose) :
 
-check:: clean app tests
+check:: tests
 
 clean:: clean-crashdump
 
@@ -283,7 +284,7 @@ pkg_apns_description = Apple Push Notification Server for Erlang
 pkg_apns_homepage = http://inaka.github.com/apns4erl
 pkg_apns_fetch = git
 pkg_apns_repo = https://github.com/inaka/apns4erl
-pkg_apns_commit = 1.0.4
+pkg_apns_commit = master
 
 PACKAGES += azdht
 pkg_azdht_name = azdht
@@ -387,7 +388,7 @@ pkg_bitcask_description = because you need another a key/value storage engine
 pkg_bitcask_homepage = https://github.com/basho/bitcask
 pkg_bitcask_fetch = git
 pkg_bitcask_repo = https://github.com/basho/bitcask
-pkg_bitcask_commit = master
+pkg_bitcask_commit = develop
 
 PACKAGES += bitstore
 pkg_bitstore_name = bitstore
@@ -421,6 +422,14 @@ pkg_boss_db_fetch = git
 pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
 pkg_boss_db_commit = master
 
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
 PACKAGES += bson
 pkg_bson_name = bson
 pkg_bson_description = BSON documents in Erlang, see bsonspec.org
@@ -451,7 +460,7 @@ pkg_cake_description = Really simple terminal colorization
 pkg_cake_homepage = https://github.com/darach/cake-erl
 pkg_cake_fetch = git
 pkg_cake_repo = https://github.com/darach/cake-erl
-pkg_cake_commit = v0.1.2
+pkg_cake_commit = master
 
 PACKAGES += carotene
 pkg_carotene_name = carotene
@@ -787,7 +796,7 @@ pkg_cowboy_description = Small, fast and modular HTTP server.
 pkg_cowboy_homepage = http://ninenines.eu
 pkg_cowboy_fetch = git
 pkg_cowboy_repo = https://github.com/ninenines/cowboy
-pkg_cowboy_commit = 1.0.1
+pkg_cowboy_commit = 1.0.4
 
 PACKAGES += cowdb
 pkg_cowdb_name = cowdb
@@ -803,7 +812,7 @@ pkg_cowlib_description = Support library for manipulating Web protocols.
 pkg_cowlib_homepage = http://ninenines.eu
 pkg_cowlib_fetch = git
 pkg_cowlib_repo = https://github.com/ninenines/cowlib
-pkg_cowlib_commit = 1.0.1
+pkg_cowlib_commit = 1.0.2
 
 PACKAGES += cpg
 pkg_cpg_name = cpg
@@ -885,14 +894,6 @@ pkg_dh_date_fetch = git
 pkg_dh_date_repo = https://github.com/daleharvey/dh_date
 pkg_dh_date_commit = master
 
-PACKAGES += dhtcrawler
-pkg_dhtcrawler_name = dhtcrawler
-pkg_dhtcrawler_description = dhtcrawler is a DHT crawler written in erlang. It can join a DHT network and crawl many P2P torrents.
-pkg_dhtcrawler_homepage = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_fetch = git
-pkg_dhtcrawler_repo = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_commit = master
-
 PACKAGES += dirbusterl
 pkg_dirbusterl_name = dirbusterl
 pkg_dirbusterl_description = DirBuster successor in Erlang
@@ -1053,14 +1054,6 @@ pkg_efene_fetch = git
 pkg_efene_repo = https://github.com/efene/efene
 pkg_efene_commit = master
 
-PACKAGES += eganglia
-pkg_eganglia_name = eganglia
-pkg_eganglia_description = Erlang library to interact with Ganglia
-pkg_eganglia_homepage = https://github.com/inaka/eganglia
-pkg_eganglia_fetch = git
-pkg_eganglia_repo = https://github.com/inaka/eganglia
-pkg_eganglia_commit = v0.9.1
-
 PACKAGES += egeoip
 pkg_egeoip_name = egeoip
 pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
@@ -1075,7 +1068,7 @@ pkg_ehsa_description = Erlang HTTP server basic and digest authentication module
 pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
 pkg_ehsa_fetch = hg
 pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
-pkg_ehsa_commit = 2.0.4
+pkg_ehsa_commit = default
 
 PACKAGES += ej
 pkg_ej_name = ej
@@ -1139,7 +1132,7 @@ pkg_elvis_description = Erlang Style Reviewer
 pkg_elvis_homepage = https://github.com/inaka/elvis
 pkg_elvis_fetch = git
 pkg_elvis_repo = https://github.com/inaka/elvis
-pkg_elvis_commit = 0.2.4
+pkg_elvis_commit = master
 
 PACKAGES += emagick
 pkg_emagick_name = emagick
@@ -1515,7 +1508,7 @@ pkg_erwa_description = A WAMP router and client written in Erlang.
 pkg_erwa_homepage = https://github.com/bwegh/erwa
 pkg_erwa_fetch = git
 pkg_erwa_repo = https://github.com/bwegh/erwa
-pkg_erwa_commit = 0.1.1
+pkg_erwa_commit = master
 
 PACKAGES += espec
 pkg_espec_name = espec
@@ -1619,7 +1612,7 @@ pkg_exometer_description = Basic measurement objects and probe behavior
 pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
 pkg_exometer_fetch = git
 pkg_exometer_repo = https://github.com/Feuerlabs/exometer
-pkg_exometer_commit = 1.2
+pkg_exometer_commit = master
 
 PACKAGES += exs1024
 pkg_exs1024_name = exs1024
@@ -1683,7 +1676,15 @@ pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
 pkg_feeder_homepage = https://github.com/michaelnisi/feeder
 pkg_feeder_fetch = git
 pkg_feeder_repo = https://github.com/michaelnisi/feeder
-pkg_feeder_commit = v1.4.6
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
 
 PACKAGES += fix
 pkg_fix_name = fix
@@ -1781,6 +1782,14 @@ pkg_geef_fetch = git
 pkg_geef_repo = https://github.com/carlosmn/geef
 pkg_geef_commit = master
 
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
 PACKAGES += gen_cycle
 pkg_gen_cycle_name = gen_cycle
 pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
@@ -1837,6 +1846,14 @@ pkg_gen_unix_fetch = git
 pkg_gen_unix_repo = https://github.com/msantos/gen_unix
 pkg_gen_unix_commit = master
 
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
 PACKAGES += getopt
 pkg_getopt_name = getopt
 pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
@@ -1981,13 +1998,21 @@ pkg_hyper_fetch = git
 pkg_hyper_repo = https://github.com/GameAnalytics/hyper
 pkg_hyper_commit = master
 
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
 PACKAGES += ibrowse
 pkg_ibrowse_name = ibrowse
 pkg_ibrowse_description = Erlang HTTP client
 pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
 pkg_ibrowse_fetch = git
 pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
-pkg_ibrowse_commit = v4.1.1
+pkg_ibrowse_commit = master
 
 PACKAGES += ierlang
 pkg_ierlang_name = ierlang
@@ -2043,7 +2068,7 @@ pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
 pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
 pkg_jamdb_sybase_fetch = git
 pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
-pkg_jamdb_sybase_commit = 0.6.0
+pkg_jamdb_sybase_commit = master
 
 PACKAGES += jerg
 pkg_jerg_name = jerg
@@ -2056,9 +2081,9 @@ pkg_jerg_commit = master
 PACKAGES += jesse
 pkg_jesse_name = jesse
 pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
-pkg_jesse_homepage = https://github.com/klarna/jesse
+pkg_jesse_homepage = https://github.com/for-GET/jesse
 pkg_jesse_fetch = git
-pkg_jesse_repo = https://github.com/klarna/jesse
+pkg_jesse_repo = https://github.com/for-GET/jesse
 pkg_jesse_commit = master
 
 PACKAGES += jiffy
@@ -2075,7 +2100,7 @@ pkg_jiffy_v_description = JSON validation utility
 pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
 pkg_jiffy_v_fetch = git
 pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
-pkg_jiffy_v_commit = 0.3.3
+pkg_jiffy_v_commit = master
 
 PACKAGES += jobs
 pkg_jobs_name = jobs
@@ -2083,7 +2108,7 @@ pkg_jobs_description = a Job scheduler for load regulation
 pkg_jobs_homepage = https://github.com/esl/jobs
 pkg_jobs_fetch = git
 pkg_jobs_repo = https://github.com/esl/jobs
-pkg_jobs_commit = 0.3
+pkg_jobs_commit = master
 
 PACKAGES += joxa
 pkg_joxa_name = joxa
@@ -2109,6 +2134,14 @@ pkg_json_rec_fetch = git
 pkg_json_rec_repo = https://github.com/justinkirby/json_rec
 pkg_json_rec_commit = master
 
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
 PACKAGES += jsonerl
 pkg_jsonerl_name = jsonerl
 pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
@@ -2149,6 +2182,14 @@ pkg_kafka_fetch = git
 pkg_kafka_repo = https://github.com/wooga/kafka-erlang
 pkg_kafka_commit = master
 
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
 PACKAGES += kai
 pkg_kai_name = kai
 pkg_kai_description = DHT storage by Takeshi Inoue
@@ -2291,7 +2332,7 @@ pkg_lasse_description = SSE handler for Cowboy
 pkg_lasse_homepage = https://github.com/inaka/lasse
 pkg_lasse_fetch = git
 pkg_lasse_repo = https://github.com/inaka/lasse
-pkg_lasse_commit = 0.1.0
+pkg_lasse_commit = master
 
 PACKAGES += ldap
 pkg_ldap_name = ldap
@@ -2501,6 +2542,14 @@ pkg_merl_fetch = git
 pkg_merl_repo = https://github.com/richcarl/merl
 pkg_merl_commit = master
 
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
 PACKAGES += mimetypes
 pkg_mimetypes_name = mimetypes
 pkg_mimetypes_description = Erlang MIME types library
@@ -2733,21 +2782,13 @@ pkg_oauth2_fetch = git
 pkg_oauth2_repo = https://github.com/kivra/oauth2
 pkg_oauth2_commit = master
 
-PACKAGES += oauth2c
-pkg_oauth2c_name = oauth2c
-pkg_oauth2c_description = Erlang OAuth2 Client
-pkg_oauth2c_homepage = https://github.com/kivra/oauth2_client
-pkg_oauth2c_fetch = git
-pkg_oauth2c_repo = https://github.com/kivra/oauth2_client
-pkg_oauth2c_commit = master
-
 PACKAGES += octopus
 pkg_octopus_name = octopus
 pkg_octopus_description = Small and flexible pool manager written in Erlang
 pkg_octopus_homepage = https://github.com/erlangbureau/octopus
 pkg_octopus_fetch = git
 pkg_octopus_repo = https://github.com/erlangbureau/octopus
-pkg_octopus_commit = 1.0.0
+pkg_octopus_commit = master
 
 PACKAGES += of_protocol
 pkg_of_protocol_name = of_protocol
@@ -2819,7 +2860,7 @@ pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
 pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
 pkg_pegjs_fetch = git
 pkg_pegjs_repo = https://github.com/dmitriid/pegjs
-pkg_pegjs_commit = 0.3
+pkg_pegjs_commit = master
 
 PACKAGES += percept2
 pkg_percept2_name = percept2
@@ -2987,7 +3028,7 @@ pkg_qdate_description = Date, time, and timezone parsing, formatting, and conver
 pkg_qdate_homepage = https://github.com/choptastic/qdate
 pkg_qdate_fetch = git
 pkg_qdate_repo = https://github.com/choptastic/qdate
-pkg_qdate_commit = 0.4.0
+pkg_qdate_commit = master
 
 PACKAGES += qrcode
 pkg_qrcode_name = qrcode
@@ -3059,7 +3100,7 @@ pkg_ranch_description = Socket acceptor pool for TCP protocols.
 pkg_ranch_homepage = http://ninenines.eu
 pkg_ranch_fetch = git
 pkg_ranch_repo = https://github.com/ninenines/ranch
-pkg_ranch_commit = 1.1.0
+pkg_ranch_commit = 1.2.1
 
 PACKAGES += rbeacon
 pkg_rbeacon_name = rbeacon
@@ -3099,7 +3140,7 @@ pkg_recon_description = Collection of functions and scripts to debug Erlang in p
 pkg_recon_homepage = https://github.com/ferd/recon
 pkg_recon_fetch = git
 pkg_recon_repo = https://github.com/ferd/recon
-pkg_recon_commit = 2.2.1
+pkg_recon_commit = master
 
 PACKAGES += record_info
 pkg_record_info_name = record_info
@@ -3293,6 +3334,14 @@ pkg_rlimit_fetch = git
 pkg_rlimit_repo = https://github.com/jlouis/rlimit
 pkg_rlimit_commit = master
 
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
 PACKAGES += safetyvalve
 pkg_safetyvalve_name = safetyvalve
 pkg_safetyvalve_description = A safety valve for your erlang node
@@ -3363,7 +3412,7 @@ pkg_shotgun_description = better than just a gun
 pkg_shotgun_homepage = https://github.com/inaka/shotgun
 pkg_shotgun_fetch = git
 pkg_shotgun_repo = https://github.com/inaka/shotgun
-pkg_shotgun_commit = 0.1.0
+pkg_shotgun_commit = master
 
 PACKAGES += sidejob
 pkg_sidejob_name = sidejob
@@ -3421,6 +3470,14 @@ pkg_skel_fetch = git
 pkg_skel_repo = https://github.com/ParaPhrase/skel
 pkg_skel_commit = master
 
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
 PACKAGES += smother
 pkg_smother_name = smother
 pkg_smother_description = Extended code coverage metrics for Erlang.
@@ -3533,6 +3590,14 @@ pkg_stripe_fetch = git
 pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
 pkg_stripe_commit = v1
 
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
 PACKAGES += surrogate
 pkg_surrogate_name = surrogate
 pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
@@ -3567,7 +3632,7 @@ pkg_switchboard_commit = master
 
 PACKAGES += syn
 pkg_syn_name = syn
-pkg_syn_description = A global process registry for Erlang.
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
 pkg_syn_homepage = https://github.com/ostinelli/syn
 pkg_syn_fetch = git
 pkg_syn_repo = https://github.com/ostinelli/syn
@@ -3739,7 +3804,7 @@ pkg_unicorn_description = Generic configuration server
 pkg_unicorn_homepage = https://github.com/shizzard/unicorn
 pkg_unicorn_fetch = git
 pkg_unicorn_repo = https://github.com/shizzard/unicorn
-pkg_unicorn_commit = 0.3.0
+pkg_unicorn_commit = master
 
 PACKAGES += unsplit
 pkg_unsplit_name = unsplit
@@ -3755,7 +3820,7 @@ pkg_uuid_description = Erlang UUID Implementation
 pkg_uuid_homepage = https://github.com/okeuday/uuid
 pkg_uuid_fetch = git
 pkg_uuid_repo = https://github.com/okeuday/uuid
-pkg_uuid_commit = v1.4.0
+pkg_uuid_commit = master
 
 PACKAGES += ux
 pkg_ux_name = ux
@@ -3875,7 +3940,7 @@ pkg_worker_pool_description = a simple erlang worker pool
 pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
 pkg_worker_pool_fetch = git
 pkg_worker_pool_repo = https://github.com/inaka/worker_pool
-pkg_worker_pool_commit = 1.0.3
+pkg_worker_pool_commit = master
 
 PACKAGES += wrangler
 pkg_wrangler_name = wrangler
@@ -3907,7 +3972,7 @@ pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
 pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
 pkg_xref_runner_fetch = git
 pkg_xref_runner_repo = https://github.com/inaka/xref_runner
-pkg_xref_runner_commit = 0.2.0
+pkg_xref_runner_commit = master
 
 PACKAGES += yamerl
 pkg_yamerl_name = yamerl
@@ -3941,13 +4006,21 @@ pkg_zab_engine_fetch = git
 pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
 pkg_zab_engine_commit = master
 
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
 PACKAGES += zeta
 pkg_zeta_name = zeta
 pkg_zeta_description = HTTP access log parser in Erlang
 pkg_zeta_homepage = https://github.com/s1n4/zeta
 pkg_zeta_fetch = git
 pkg_zeta_repo = https://github.com/s1n4/zeta
-pkg_zeta_commit =  
+pkg_zeta_commit = master
 
 PACKAGES += zippers
 pkg_zippers_name = zippers
@@ -4063,6 +4136,9 @@ deps::
 else
 deps:: $(ALL_DEPS_DIRS)
 ifndef IS_APP
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               mkdir -p $$dep/ebin; \
+       done
        $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
                $(MAKE) -C $$dep IS_APP=1 || exit $$?; \
        done
@@ -4092,7 +4168,10 @@ endif
 # While Makefile file could be GNUmakefile or makefile,
 # in practice only Makefile is needed so far.
 define dep_autopatch
-       if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+               $(call dep_autopatch_erlang_mk,$(1)); \
+       elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
                if [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
                        $(call dep_autopatch2,$(1)); \
                elif [ 0 != `grep -ci rebar $(DEPS_DIR)/$(1)/Makefile` ]; then \
@@ -4100,12 +4179,7 @@ define dep_autopatch
                elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i rebar '{}' \;`" ]; then \
                        $(call dep_autopatch2,$(1)); \
                else \
-                       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
-                               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
-                               $(call dep_autopatch_erlang_mk,$(1)); \
-                       else \
-                               $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
-                       fi \
+                       $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
                fi \
        else \
                if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
@@ -4117,8 +4191,11 @@ define dep_autopatch
 endef
 
 define dep_autopatch2
+       if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+               $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+       fi; \
        $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
-       if [ -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
+       if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
                $(call dep_autopatch_fetch_rebar); \
                $(call dep_autopatch_rebar,$(1)); \
        else \
@@ -4256,57 +4333,6 @@ define dep_autopatch_rebar.erl
                                Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
                end
        end(),
-       FindFirst = fun(F, Fd) ->
-               case io:parse_erl_form(Fd, undefined) of
-                       {ok, {attribute, _, compile, {parse_transform, PT}}, _} ->
-                               [PT, F(F, Fd)];
-                       {ok, {attribute, _, compile, CompileOpts}, _} when is_list(CompileOpts) ->
-                               case proplists:get_value(parse_transform, CompileOpts) of
-                                       undefined -> [F(F, Fd)];
-                                       PT -> [PT, F(F, Fd)]
-                               end;
-                       {ok, {attribute, _, include, Hrl}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
-                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                       _ ->
-                                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ Hrl, [read]) of
-                                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                                       _ -> [F(F, Fd)]
-                                               end
-                               end;
-                       {ok, {attribute, _, include_lib, "$(1)/include/" ++ Hrl}, _} ->
-                               {ok, HrlFd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]),
-                               [F(F, HrlFd), F(F, Fd)];
-                       {ok, {attribute, _, include_lib, Hrl}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
-                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                       _ -> [F(F, Fd)]
-                               end;
-                       {ok, {attribute, _, import, {Imp, _}}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(Imp) ++ ".erl", [read]) of
-                                       {ok, ImpFd} -> [Imp, F(F, ImpFd), F(F, Fd)];
-                                       _ -> [F(F, Fd)]
-                               end;
-                       {eof, _} ->
-                               file:close(Fd),
-                               [];
-                       _ ->
-                               F(F, Fd)
-               end
-       end,
-       fun() ->
-               ErlFiles = filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)*.erl"),
-               First0 = lists:usort(lists:flatten([begin
-                       {ok, Fd} = file:open(F, [read]),
-                       FindFirst(FindFirst, Fd)
-               end || F <- ErlFiles])),
-               First = lists:flatten([begin
-                       {ok, Fd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", [read]),
-                       FindFirst(FindFirst, Fd)
-               end || M <- First0, lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)]) ++ First0,
-               Write(["COMPILE_FIRST +=", [[" ", atom_to_list(M)] || M <- First,
-                       lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)], "\n"])
-       end(),
        Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
        Write("\npreprocess::\n"),
        Write("\npre-deps::\n"),
@@ -4374,9 +4400,9 @@ define dep_autopatch_rebar.erl
                [] -> ok;
                _ ->
                        Write("\npre-app::\n\t$$\(MAKE) -f c_src/Makefile.erlang.mk\n"),
-                       PortSpecWrite(io_lib:format("ERL_CFLAGS = -finline-functions -Wall -fPIC -I ~s/erts-~s/include -I ~s\n",
+                       PortSpecWrite(io_lib:format("ERL_CFLAGS = -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
                                [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
-                       PortSpecWrite(io_lib:format("ERL_LDFLAGS = -L ~s -lerl_interface -lei\n",
+                       PortSpecWrite(io_lib:format("ERL_LDFLAGS = -L \\"~s\\" -lerl_interface -lei\n",
                                [code:lib_dir(erl_interface, lib)])),
                        [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
                        FilterEnv = fun(Env) ->
@@ -4419,9 +4445,10 @@ define dep_autopatch_rebar.erl
                                        Output, ": $$\(foreach ext,.c .C .cc .cpp,",
                                                "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
                                        "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
-                                       case filename:extension(Output) of
-                                               [] -> "\n";
-                                               _ -> " -shared\n"
+                                       case {filename:extension(Output), $(PLATFORM)} of
+                                           {[], _} -> "\n";
+                                           {_, darwin} -> "\n";
+                                           _ -> " -shared\n"
                                        end])
                        end,
                        [PortSpec(S) || S <- PortSpecs]
@@ -4490,6 +4517,15 @@ define dep_autopatch_app.erl
        halt()
 endef
 
+define dep_autopatch_appsrc_script.erl
+       AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+       AppSrcScript = AppSrc ++ ".script",
+       Bindings = erl_eval:new_bindings(),
+       {ok, Conf} = file:script(AppSrcScript, Bindings),
+       ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+       halt()
+endef
+
 define dep_autopatch_appsrc.erl
        AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
        AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
@@ -4576,10 +4612,11 @@ $(DEPS_DIR)/$(call dep_name,$1):
                exit 17; \
        fi
        $(verbose) mkdir -p $(DEPS_DIR)
-       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$1)),$1)
-       $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure.ac -o -f $(DEPS_DIR)/$(DEP_NAME)/configure.in ]; then \
-               echo " AUTO  " $(DEP_STR); \
-               cd $(DEPS_DIR)/$(DEP_NAME) && autoreconf -Wall -vif -I m4; \
+       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+       $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+                       && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+               echo " AUTO  " $(1); \
+               cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
        fi
        - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
                echo " CONF  " $(DEP_STR); \
@@ -4664,6 +4701,7 @@ $(foreach p,$(DEP_PLUGINS),\
 DTL_FULL_PATH ?=
 DTL_PATH ?= templates/
 DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
 
 # Verbosity.
 
@@ -4672,28 +4710,10 @@ dtl_verbose = $(dtl_verbose_$(V))
 
 # Core targets.
 
-define erlydtl_compile.erl
-       [begin
-               Module0 = case "$(strip $(DTL_FULL_PATH))" of
-                       "" ->
-                               filename:basename(F, ".dtl");
-                       _ ->
-                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
-                               re:replace(F2, "/",  "_",  [{return, list}, global])
-               end,
-               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
-               case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
-                       ok -> ok;
-                       {ok, _} -> ok
-               end
-       end || F <- string:tokens("$(1)", " ")],
-       halt().
-endef
-
-ifneq ($(wildcard src/),)
-
 DTL_FILES = $(sort $(call core_find,$(DTL_PATH),*.dtl))
 
+ifneq ($(DTL_FILES),)
+
 ifdef DTL_FULL_PATH
 BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(subst /,_,$(DTL_FILES:$(DTL_PATH)%=%))))
 else
@@ -4701,7 +4721,7 @@ BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(notdir $(DTL_FILES
 endif
 
 ifneq ($(words $(DTL_FILES)),0)
-# Rebuild everything when the Makefile changes.
+# Rebuild templates when the Makefile changes.
 $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
        @mkdir -p $(ERLANG_MK_TMP)
        @if test -f $@; then \
@@ -4712,9 +4732,28 @@ $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
 ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
 endif
 
-ebin/$(PROJECT).app:: $(DTL_FILES)
+define erlydtl_compile.erl
+       [begin
+               Module0 = case "$(strip $(DTL_FULL_PATH))" of
+                       "" ->
+                               filename:basename(F, ".dtl");
+                       _ ->
+                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
+                               re:replace(F2, "/",  "_",  [{return, list}, global])
+               end,
+               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+               case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
+                       ok -> ok;
+                       {ok, _} -> ok
+               end
+       end || F <- string:tokens("$(1)", " ")],
+       halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
        $(if $(strip $?),\
-               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?,-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/)))
+               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?),-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/))
+
 endif
 
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
@@ -4810,7 +4849,7 @@ app:: clean deps $(PROJECT).d
        $(verbose) $(MAKE) --no-print-directory app-build
 endif
 
-ifeq ($(wildcard src/$(PROJECT)_app.erl),)
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
 define app_file
 {application, $(PROJECT), [
        {description, "$(PROJECT_DESCRIPTION)"},
@@ -4830,7 +4869,7 @@ define app_file
        {modules, [$(call comma_list,$(2))]},
        {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
        {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS))]},
-       {mod, {$(PROJECT)_app, []}}
+       {mod, {$(PROJECT_MOD), []}}
 ]}.
 endef
 endif
@@ -4888,51 +4927,79 @@ $(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
 # Erlang and Core Erlang files.
 
 define makedep.erl
+       E = ets:new(makedep, [bag]),
+       G = digraph:new([acyclic]),
        ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
-       Modules = [{filename:basename(F, ".erl"), F} || F <- ErlFiles],
-       Add = fun (Dep, Acc) ->
-               case lists:keyfind(atom_to_list(Dep), 1, Modules) of
-                       {_, DepFile} -> [DepFile|Acc];
-                       false -> Acc
+       Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+       Add = fun (Mod, Dep) ->
+               case lists:keyfind(Dep, 1, Modules) of
+                       false -> ok;
+                       {_, DepFile} ->
+                               {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+                               ets:insert(E, {ModFile, DepFile}),
+                               digraph:add_vertex(G, Mod),
+                               digraph:add_vertex(G, Dep),
+                               digraph:add_edge(G, Mod, Dep)
                end
        end,
-       AddHd = fun (Dep, Acc) ->
-               case {Dep, lists:keymember(Dep, 2, Modules)} of
-                       {"src/" ++ _, false} -> [Dep|Acc];
-                       {"include/" ++ _, false} -> [Dep|Acc];
-                       _ -> Acc
+       AddHd = fun (F, Mod, DepFile) ->
+               case file:open(DepFile, [read]) of
+                       {error, enoent} -> ok;
+                       {ok, Fd} ->
+                               F(F, Fd, Mod),
+                               {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+                               ets:insert(E, {ModFile, DepFile})
                end
        end,
-       CompileFirst = fun (Deps) ->
-               First0 = [case filename:extension(D) of
-                       ".erl" -> filename:basename(D, ".erl");
-                       _ -> []
-               end || D <- Deps],
-               case lists:usort(First0) of
-                       [] -> [];
-                       [[]] -> [];
-                       First -> ["COMPILE_FIRST +=", [[" ", F] || F <- First], "\n"]
-               end
+       Attr = fun
+               (F, Mod, behavior, Dep) -> Add(Mod, Dep);
+               (F, Mod, behaviour, Dep) -> Add(Mod, Dep);
+               (F, Mod, compile, {parse_transform, Dep}) -> Add(Mod, Dep);
+               (F, Mod, compile, Opts) when is_list(Opts) ->
+                       case proplists:get_value(parse_transform, Opts) of
+                               undefined -> ok;
+                               Dep -> Add(Mod, Dep)
+                       end;
+               (F, Mod, include, Hrl) ->
+                       case filelib:is_file("include/" ++ Hrl) of
+                               true -> AddHd(F, Mod, "include/" ++ Hrl);
+                               false ->
+                                       case filelib:is_file("src/" ++ Hrl) of
+                                               true -> AddHd(F, Mod, "src/" ++ Hrl);
+                                               false -> false
+                                       end
+                       end;
+               (F, Mod, include_lib, "$1/include/" ++ Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+               (F, Mod, include_lib, Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+               (F, Mod, import, {Imp, _}) ->
+                       case filelib:is_file("src/" ++ atom_to_list(Imp) ++ ".erl") of
+                               false -> ok;
+                               true -> Add(Mod, Imp)
+                       end;
+               (_, _, _, _) -> ok
        end,
-       Depend = [begin
-               case epp:parse_file(F, ["include/"], []) of
-                       {ok, Forms} ->
-                               Deps = lists:usort(lists:foldl(fun
-                                       ({attribute, _, behavior, Dep}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, behaviour, Dep}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, compile, {parse_transform, Dep}}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, file, {Dep, _}}, Acc) -> AddHd(Dep, Acc);
-                                       (_, Acc) -> Acc
-                               end, [], Forms)),
-                               case Deps of
-                                       [] -> "";
-                                       _ -> [F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n", CompileFirst(Deps)]
-                               end;
-                       {error, enoent} ->
-                               []
+       MakeDepend = fun(F, Fd, Mod) ->
+               case io:parse_erl_form(Fd, undefined) of
+                       {ok, {attribute, _, Key, Value}, _} ->
+                               Attr(F, Mod, Key, Value),
+                               F(F, Fd, Mod);
+                       {eof, _} ->
+                               file:close(Fd);
+                       _ ->
+                               F(F, Fd, Mod)
                end
+       end,
+       [begin
+               Mod = list_to_atom(filename:basename(F, ".erl")),
+               {ok, Fd} = file:open(F, [read]),
+               MakeDepend(MakeDepend, Fd, Mod)
        end || F <- ErlFiles],
-       ok = file:write_file("$(1)", Depend),
+       Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+       CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+       ok = file:write_file("$(1)", [
+               [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+               "\nCOMPILE_FIRST +=", [[" ", atom_to_list(CF)] || CF <- CompileFirst], "\n"
+       ]),
        halt()
 endef
 
@@ -4977,13 +5044,13 @@ ifeq ($(wildcard src/$(PROJECT).app.src),)
        $(app_verbose) printf "$(subst $(newline),\n,$(subst ",\",$(call app_file,$(GITDESCRIBE),$(MODULES))))" \
                > ebin/$(PROJECT).app
 else
-       $(verbose) if [ -z "$$(grep -E '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+       $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
                echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk README for instructions." >&2; \
                exit 1; \
        fi
        $(appsrc_verbose) cat src/$(PROJECT).app.src \
                | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
-               | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(GITDESCRIBE)\"}/" \
+               | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
                > ebin/$(PROJECT).app
 endif
 
@@ -5069,6 +5136,11 @@ test-dir:
                $(call core_find,$(TEST_DIR)/,*.erl) -pa ebin/
 endif
 
+ifeq ($(wildcard src),)
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: clean deps test-deps
+       $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+else
 ifeq ($(wildcard ebin/test),)
 test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
 test-build:: clean deps test-deps $(PROJECT).d
@@ -5086,6 +5158,7 @@ clean-test-dir:
 ifneq ($(wildcard $(TEST_DIR)/*.beam),)
        $(gen_verbose) rm -f $(TEST_DIR)/*.beam
 endif
+endif
 
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -5095,7 +5168,7 @@ endif
 # We strip out -Werror because we don't want to fail due to
 # warnings when used as a dependency.
 
-compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/')
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
 
 define compat_convert_erlc_opts
 $(if $(filter-out -Werror,$1),\
@@ -5103,11 +5176,18 @@ $(if $(filter-out -Werror,$1),\
                $(shell echo $1 | cut -b 2-)))
 endef
 
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
 define compat_rebar_config
-{deps, [$(call comma_list,$(foreach d,$(DEPS),\
-       {$(call dep_name,$d),".*",{git,"$(call dep_repo,$d)","$(call dep_commit,$d)"}}))]}.
-{erl_opts, [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$(ERLC_OPTS)),\
-       $(call compat_convert_erlc_opts,$o)))]}.
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+       $(if $(filter hex,$(call dep_fetch,$d)),\
+               {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+               {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
 endef
 
 $(eval _compat_rebar_config = $$(compat_rebar_config))
@@ -5126,12 +5206,12 @@ MAN_SECTIONS ?= 3 7
 
 docs:: asciidoc
 
-asciidoc: distclean-asciidoc doc-deps asciidoc-guide asciidoc-manual
+asciidoc: asciidoc-guide asciidoc-manual
 
 ifeq ($(wildcard doc/src/guide/book.asciidoc),)
 asciidoc-guide:
 else
-asciidoc-guide:
+asciidoc-guide: distclean-asciidoc doc-deps
        a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
        a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
 endif
@@ -5139,7 +5219,7 @@ endif
 ifeq ($(wildcard doc/src/manual/*.asciidoc),)
 asciidoc-manual:
 else
-asciidoc-manual:
+asciidoc-manual: distclean-asciidoc doc-deps
        for f in doc/src/manual/*.asciidoc ; do \
                a2x -v -f manpage $$f ; \
        done
@@ -5154,7 +5234,7 @@ install-docs:: install-asciidoc
 install-asciidoc: asciidoc-manual
        for s in $(MAN_SECTIONS); do \
                mkdir -p $(MAN_INSTALL_PATH)/man$$s/ ; \
-               install -g 0 -o 0 -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
+               install -g `id -u` -o `id -g` -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
        done
 endif
 
@@ -5176,8 +5256,8 @@ help::
                "  bootstrap          Generate a skeleton of an OTP application" \
                "  bootstrap-lib      Generate a skeleton of an OTP library" \
                "  bootstrap-rel      Generate the files needed to build a release" \
-               "  new-app n=NAME     Create a new local OTP application NAME" \
-               "  new-lib n=NAME     Create a new local OTP library NAME" \
+               "  new-app in=NAME    Create a new local OTP application NAME" \
+               "  new-lib in=NAME    Create a new local OTP library NAME" \
                "  new t=TPL n=NAME   Generate a module NAME based on the template TPL" \
                "  new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
                "  list-templates     List available templates"
@@ -5214,6 +5294,8 @@ define bs_appsrc_lib
 ]}.
 endef
 
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
 ifdef SP
 define bs_Makefile
 PROJECT = $p
@@ -5223,17 +5305,21 @@ PROJECT_VERSION = 0.0.1
 # Whitespace to be used when creating files from templates.
 SP = $(SP)
 
-include erlang.mk
 endef
 else
 define bs_Makefile
 PROJECT = $p
-include erlang.mk
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
 endef
 endif
 
 define bs_apps_Makefile
 PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
 include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)/erlang.mk
 endef
 
@@ -5331,6 +5417,11 @@ code_change(_OldVsn, State, _Extra) ->
        {ok, State}.
 endef
 
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
 define tpl_cowboy_http
 -module($(n)).
 -behaviour(cowboy_http_handler).
@@ -5527,6 +5618,7 @@ endif
        $(eval p := $(PROJECT))
        $(eval n := $(PROJECT)_sup)
        $(call render_template,bs_Makefile,Makefile)
+       $(verbose) echo "include erlang.mk" >> Makefile
        $(verbose) mkdir src/
 ifdef LEGACY
        $(call render_template,bs_appsrc,src/$(PROJECT).app.src)
@@ -5540,6 +5632,7 @@ ifneq ($(wildcard src/),)
 endif
        $(eval p := $(PROJECT))
        $(call render_template,bs_Makefile,Makefile)
+       $(verbose) echo "include erlang.mk" >> Makefile
        $(verbose) mkdir src/
 ifdef LEGACY
        $(call render_template,bs_appsrc_lib,src/$(PROJECT).app.src)
@@ -5620,12 +5713,33 @@ list-templates:
 
 C_SRC_DIR ?= $(CURDIR)/c_src
 C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
-C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT).so
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
 C_SRC_TYPE ?= shared
 
 # System type and C compiler/flags.
 
-ifeq ($(PLATFORM),darwin)
+ifeq ($(PLATFORM),msys2)
+       C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+       C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+       C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+       C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+       C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+       C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+       CC = /mingw64/bin/gcc
+       export CC
+       CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
        CC ?= cc
        CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes
        CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall
@@ -5640,10 +5754,15 @@ else ifeq ($(PLATFORM),linux)
        CXXFLAGS ?= -O3 -finline-functions -Wall
 endif
 
-CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
-CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+ifneq ($(PLATFORM),msys2)
+       CFLAGS += -fPIC
+       CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
 
-LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lerl_interface -lei
 
 # Verbosity.
 
@@ -5680,15 +5799,15 @@ OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
 COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
 COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
 
-app:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
 
-test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
 
-$(C_SRC_OUTPUT): $(OBJECTS)
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
        $(verbose) mkdir -p priv/
        $(link_verbose) $(CC) $(OBJECTS) \
                $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
-               -o $(C_SRC_OUTPUT)
+               -o $(C_SRC_OUTPUT_FILE)
 
 %.o: %.c
        $(COMPILE_C) $(OUTPUT_OPTION) $<
@@ -5705,13 +5824,13 @@ $(C_SRC_OUTPUT): $(OBJECTS)
 clean:: clean-c_src
 
 clean-c_src:
-       $(gen_verbose) rm -f $(C_SRC_OUTPUT) $(OBJECTS)
+       $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
 
 endif
 
 ifneq ($(wildcard $(C_SRC_DIR)),)
 $(C_SRC_ENV):
-       $(verbose) $(ERL) -eval "file:write_file(\"$(C_SRC_ENV)\", \
+       $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
                io_lib:format( \
                        \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
                        \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
@@ -5889,7 +6008,7 @@ endif
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
-.PHONY: ct distclean-ct
+.PHONY: ct apps-ct distclean-ct
 
 # Configuration.
 
@@ -5919,22 +6038,44 @@ help::
 CT_RUN = ct_run \
        -no_auto_compile \
        -noinput \
-       -pa $(CURDIR)/ebin $(DEPS_DIR)/*/ebin $(TEST_DIR) \
+       -pa $(CURDIR)/ebin $(DEPS_DIR)/*/ebin $(APPS_DIR)/*/ebin $(TEST_DIR) \
        -dir $(TEST_DIR) \
        -logdir $(CURDIR)/logs
 
 ifeq ($(CT_SUITES),)
-ct:
+ct: $(if $(IS_APP),,apps-ct)
 else
-ct: test-build
+ct: test-build $(if $(IS_APP),,apps-ct)
        $(verbose) mkdir -p $(CURDIR)/logs/
-       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+       $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1:
+       $(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: test-build $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifndef t
+CT_EXTRA =
+else
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
 endif
 
 define ct_suite_target
 ct-$(1): test-build
        $(verbose) mkdir -p $(CURDIR)/logs/
-       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_OPTS)
+       $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
 endef
 
 $(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
@@ -5953,9 +6094,8 @@ DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
 export DIALYZER_PLT
 
 PLT_APPS ?=
-DIALYZER_DIRS ?= --src -r src
-DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \
-       -Wunmatched_returns # -Wunderspecs
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
 
 # Core targets.
 
@@ -5971,6 +6111,18 @@ help::
 
 # Plugin-specific targets.
 
+define filter_opts.erl
+       Opts = binary:split(<<"$1">>, <<"-">>, [global]),
+       Filtered = lists:reverse(lists:foldl(fun
+               (O = <<"pa ", _/bits>>, Acc) -> [O|Acc];
+               (O = <<"D ", _/bits>>, Acc) -> [O|Acc];
+               (O = <<"I ", _/bits>>, Acc) -> [O|Acc];
+               (_, Acc) -> Acc
+       end, [], Opts)),
+       io:format("~s~n", [[["-", O] || O <- Filtered]]),
+       halt().
+endef
+
 $(DIALYZER_PLT): deps app
        $(verbose) dialyzer --build_plt --apps erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS)
 
@@ -5984,7 +6136,7 @@ dialyze:
 else
 dialyze: $(DIALYZER_PLT)
 endif
-       $(verbose) dialyzer --no_native $(DIALYZER_DIRS) $(DIALYZER_OPTS)
+       $(verbose) dialyzer --no_native `$(call erlang,$(call filter_opts.erl,$(ERLC_OPTS)))` $(DIALYZER_DIRS) $(DIALYZER_OPTS)
 
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -5997,58 +6149,20 @@ EDOC_OPTS ?=
 
 # Core targets.
 
-docs:: distclean-edoc edoc
+ifneq ($(wildcard doc/overview.edoc),)
+docs:: edoc
+endif
 
 distclean:: distclean-edoc
 
 # Plugin-specific targets.
 
-edoc: doc-deps
+edoc: distclean-edoc doc-deps
        $(gen_verbose) $(ERL) -eval 'edoc:application($(PROJECT), ".", [$(EDOC_OPTS)]), halt().'
 
 distclean-edoc:
        $(gen_verbose) rm -f doc/*.css doc/*.html doc/*.png doc/edoc-info
 
-# Copyright (c) 2015, Erlang Solutions Ltd.
-# This file is part of erlang.mk and subject to the terms of the ISC License.
-
-.PHONY: elvis distclean-elvis
-
-# Configuration.
-
-ELVIS_CONFIG ?= $(CURDIR)/elvis.config
-
-ELVIS ?= $(CURDIR)/elvis
-export ELVIS
-
-ELVIS_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis
-ELVIS_CONFIG_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis.config
-ELVIS_OPTS ?=
-
-# Core targets.
-
-help::
-       $(verbose) printf "%s\n" "" \
-               "Elvis targets:" \
-               "  elvis       Run Elvis using the local elvis.config or download the default otherwise"
-
-distclean:: distclean-elvis
-
-# Plugin-specific targets.
-
-$(ELVIS):
-       $(gen_verbose) $(call core_http_get,$(ELVIS),$(ELVIS_URL))
-       $(verbose) chmod +x $(ELVIS)
-
-$(ELVIS_CONFIG):
-       $(verbose) $(call core_http_get,$(ELVIS_CONFIG),$(ELVIS_CONFIG_URL))
-
-elvis: $(ELVIS) $(ELVIS_CONFIG)
-       $(verbose) $(ELVIS) rock -c $(ELVIS_CONFIG) $(ELVIS_OPTS)
-
-distclean-elvis:
-       $(gen_verbose) rm -rf $(ELVIS)
-
 # Copyright (c) 2014 Dave Cottlehuber <dch@skunkwerks.at>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
@@ -6057,6 +6171,8 @@ distclean-elvis:
 # Configuration.
 
 ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
 ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
 
 ESCRIPT_BEAMS ?= "ebin/*", "deps/*/ebin/*"
@@ -6102,7 +6218,7 @@ define ESCRIPT_RAW
 '  ]),'\
 '  file:change_mode(Escript, 8#755)'\
 'end,'\
-'Ez("$(ESCRIPT_NAME)"),'\
+'Ez("$(ESCRIPT_FILE)"),'\
 'halt().'
 endef
 
@@ -6114,6 +6230,75 @@ escript:: distclean-escript deps app
 distclean-escript:
        $(gen_verbose) rm -f $(ESCRIPT_NAME)
 
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "EUnit targets:" \
+               "  eunit       Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+       case "$(COVER)" of
+               "" -> ok;
+               _ ->
+                       case cover:compile_beam_directory("ebin") of
+                               {error, _} -> halt(1);
+                               _ -> ok
+                       end
+       end,
+       case eunit:test($1, [$(EUNIT_OPTS)]) of
+               ok -> ok;
+               error -> halt(2)
+       end,
+       case "$(COVER)" of
+               "" -> ok;
+               _ ->
+                       cover:export("eunit.coverdata")
+       end,
+       halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(DEPS_DIR)/*/ebin $(APPS_DIR)/*/ebin $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build
+       $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build
+       $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+       $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP),,apps-eunit)
+       $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit:
+       $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; done
+endif
+endif
+
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
@@ -6124,7 +6309,7 @@ distclean-escript:
 RELX ?= $(CURDIR)/relx
 RELX_CONFIG ?= $(CURDIR)/relx.config
 
-RELX_URL ?= https://github.com/erlware/relx/releases/download/v3.5.0/relx
+RELX_URL ?= https://github.com/erlware/relx/releases/download/v3.19.0/relx
 RELX_OPTS ?=
 RELX_OUTPUT_DIR ?= _rel
 
@@ -6392,7 +6577,8 @@ define cover_report.erl
                true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
        TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
        TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
-       TotalPerc = round(100 * TotalY / (TotalY + TotalN)),
+       Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+       TotalPerc = Perc(TotalY, TotalN),
        {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
        io:format(F, "<!DOCTYPE html><html>~n"
                "<head><meta charset=\"UTF-8\">~n"
@@ -6402,7 +6588,7 @@ define cover_report.erl
        io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
        [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
                "<td>~p%</td></tr>~n",
-               [M, M, round(100 * Y / (Y + N))]) || {M, {Y, N}} <- Report1],
+               [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
        How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
        Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
        io:format(F, "</table>~n"
diff --git a/rabbitmq-server/deps/rabbitmq_shovel_management/etc/rabbit-test.config b/rabbitmq-server/deps/rabbitmq_shovel_management/etc/rabbit-test.config
deleted file mode 100644 (file)
index a0546a2..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-%% We test sample retention separately in rabbit_mgmt_test_db_unit,
-%% but for rabbit_mgmt_test_db we want to make sure samples never
-%% expire.
-[
- {rabbitmq_shovel,
-  [{shovels,
-     [{'my-static',
-       [{sources, [{broker, "amqp://"},
-                   {declarations, [{'queue.declare', [{queue, <<"static">>}]}]}
-                  ]},
-         {destinations, [{broker, "amqp://"}]},
-         {queue, <<"static">>},
-         {publish_fields, [ {exchange, <<"">>},
-                            {routing_key, <<"static2">>}
-                          ]}
-       ]}
-      ]}
-    ]}
-].
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index ea72ba83fc4f176bb11cdd4cfea8edc812f13596..7c563155a917cbdd70b4d5008bf5b5634ea7a4cc 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_shovel_management,
  [{description, "Shovel Status"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {applications, [kernel, stdlib, rabbit, rabbitmq_management]}]}.
diff --git a/rabbitmq-server/deps/rabbitmq_shovel_management/test/http_SUITE.erl b/rabbitmq-server/deps/rabbitmq_shovel_management/test/http_SUITE.erl
new file mode 100644 (file)
index 0000000..f71fd12
--- /dev/null
@@ -0,0 +1,281 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ Management Console.
+%%
+%%   The Initial Developer of the Original Code is GoPivotal, Inc.
+%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
+%%
+
+-module(http_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbitmq_management/include/rabbit_mgmt_test.hrl").
+
+-compile(export_all).
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+          shovels,
+          dynamic_plugin_enable_disable
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE}
+      ]),
+    rabbit_ct_helpers:run_setup_steps(Config1, [
+        fun configure_shovels/1,
+        fun start_inets/1
+      ] ++
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+configure_shovels(Config) ->
+    rabbit_ct_helpers:merge_app_env(Config,
+      {rabbitmq_shovel, [
+          {shovels,
+            [{'my-static',
+                [{sources, [
+                      {broker, "amqp://"},
+                      {declarations, [
+                          {'queue.declare', [{queue, <<"static">>}]}]}
+                    ]},
+                  {destinations, [{broker, "amqp://"}]},
+                  {queue, <<"static">>},
+                  {publish_fields, [
+                      {exchange, <<"">>},
+                      {routing_key, <<"static2">>}
+                    ]}
+                ]}
+            ]}
+        ]}).
+
+start_inets(Config) ->
+    ok = application:start(inets),
+    Config.
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+shovels(Config) ->
+    http_put(Config, "/users/admin",
+      [{password, <<"admin">>}, {tags, <<"administrator">>}], ?NO_CONTENT),
+    http_put(Config, "/users/mon",
+      [{password, <<"mon">>}, {tags, <<"monitoring">>}], ?NO_CONTENT),
+    http_put(Config, "/vhosts/v", none, ?NO_CONTENT),
+    Perms = [{configure, <<".*">>},
+             {write,     <<".*">>},
+             {read,      <<".*">>}],
+    http_put(Config, "/permissions/v/guest",  Perms, ?NO_CONTENT),
+    http_put(Config, "/permissions/v/admin",  Perms, ?NO_CONTENT),
+    http_put(Config, "/permissions/v/mon",    Perms, ?NO_CONTENT),
+
+    [http_put(Config, "/parameters/shovel/" ++ V ++ "/my-dynamic",
+              [{value, [{'src-uri', <<"amqp://">>},
+                        {'dest-uri', <<"amqp://">>},
+                        {'src-queue', <<"test">>},
+                        {'dest-queue', <<"test2">>}]}], ?NO_CONTENT)
+     || V <- ["%2f", "v"]],
+    Static = [{name,  <<"my-static">>},
+              {type,  <<"static">>}],
+    Dynamic1 = [{name,  <<"my-dynamic">>},
+                {vhost, <<"/">>},
+                {type,  <<"dynamic">>}],
+    Dynamic2 = [{name,  <<"my-dynamic">>},
+                {vhost, <<"v">>},
+                {type,  <<"dynamic">>}],
+    Assert = fun (Req, User, Res) ->
+                     assert_list(Res, http_get(Config, Req, User, User, ?OK))
+             end,
+    Assert("/shovels",     "guest", [Static, Dynamic1, Dynamic2]),
+    Assert("/shovels/%2f", "guest", [Dynamic1]),
+    Assert("/shovels/v",   "guest", [Dynamic2]),
+    Assert("/shovels",     "admin", [Static, Dynamic2]),
+    Assert("/shovels/%2f", "admin", []),
+    Assert("/shovels/v",   "admin", [Dynamic2]),
+    Assert("/shovels",     "mon", [Dynamic2]),
+    Assert("/shovels/%2f", "mon", []),
+    Assert("/shovels/v",   "mon", [Dynamic2]),
+
+    http_delete(Config, "/vhosts/v", ?NO_CONTENT),
+    http_delete(Config, "/users/admin", ?NO_CONTENT),
+    http_delete(Config, "/users/mon", ?NO_CONTENT),
+    ok.
+
+%% It's a bit arbitrary to be testing this here, but we want to be
+%% able to test that mgmt extensions can be started and stopped
+%% *somewhere*, and here is as good a place as any.
+dynamic_plugin_enable_disable(Config) ->
+    http_get(Config, "/shovels", ?OK),
+    rabbit_ct_broker_helpers:disable_plugin(Config, 0,
+      "rabbitmq_shovel_management"),
+    http_get(Config, "/shovels", ?NOT_FOUND),
+    http_get(Config, "/overview", ?OK),
+    rabbit_ct_broker_helpers:disable_plugin(Config, 0,
+      "rabbitmq_management"),
+    http_fail(Config, "/shovels"),
+    http_fail(Config, "/overview"),
+    rabbit_ct_broker_helpers:enable_plugin(Config, 0,
+      "rabbitmq_management"),
+    http_get(Config, "/shovels", ?NOT_FOUND),
+    http_get(Config, "/overview", ?OK),
+    rabbit_ct_broker_helpers:enable_plugin(Config, 0,
+      "rabbitmq_shovel_management"),
+    http_get(Config, "/shovels", ?OK),
+    http_get(Config, "/overview", ?OK),
+    passed.
+
+%%---------------------------------------------------------------------------
+%% TODO this is mostly copypasta from the mgmt tests
+
+http_get(Config, Path) ->
+    http_get(Config, Path, ?OK).
+
+http_get(Config, Path, CodeExp) ->
+    http_get(Config, Path, "guest", "guest", CodeExp).
+
+http_get(Config, Path, User, Pass, CodeExp) ->
+    {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+        req(Config, get, Path, [auth_header(User, Pass)]),
+    assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
+    decode(CodeExp, Headers, ResBody).
+
+http_fail(Config, Path) ->
+    {error, {failed_connect, _}} = req(Config, get, Path, []).
+
+http_put(Config, Path, List, CodeExp) ->
+    http_put_raw(Config, Path, format_for_upload(List), CodeExp).
+
+http_put(Config, Path, List, User, Pass, CodeExp) ->
+    http_put_raw(Config, Path, format_for_upload(List), User, Pass, CodeExp).
+
+http_post(Config, Path, List, CodeExp) ->
+    http_post_raw(Config, Path, format_for_upload(List), CodeExp).
+
+http_post(Config, Path, List, User, Pass, CodeExp) ->
+    http_post_raw(Config, Path, format_for_upload(List), User, Pass, CodeExp).
+
+format_for_upload(none) ->
+    <<"">>;
+format_for_upload(List) ->
+    iolist_to_binary(mochijson2:encode({struct, List})).
+
+http_put_raw(Config, Path, Body, CodeExp) ->
+    http_upload_raw(Config, put, Path, Body, "guest", "guest", CodeExp).
+
+http_put_raw(Config, Path, Body, User, Pass, CodeExp) ->
+    http_upload_raw(Config, put, Path, Body, User, Pass, CodeExp).
+
+http_post_raw(Config, Path, Body, CodeExp) ->
+    http_upload_raw(Config, post, Path, Body, "guest", "guest", CodeExp).
+
+http_post_raw(Config, Path, Body, User, Pass, CodeExp) ->
+    http_upload_raw(Config, post, Path, Body, User, Pass, CodeExp).
+
+http_upload_raw(Config, Type, Path, Body, User, Pass, CodeExp) ->
+    {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+        req(Config, Type, Path, [auth_header(User, Pass)], Body),
+    assert_code(CodeExp, CodeAct, Type, Path, ResBody),
+    decode(CodeExp, Headers, ResBody).
+
+http_delete(Config, Path, CodeExp) ->
+    http_delete(Config, Path, "guest", "guest", CodeExp).
+
+http_delete(Config, Path, User, Pass, CodeExp) ->
+    {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
+        req(Config, delete, Path, [auth_header(User, Pass)]),
+    assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody),
+    decode(CodeExp, Headers, ResBody).
+
+assert_code(CodeExp, CodeAct, _Type, _Path, _Body) ->
+    ?assertEqual(CodeExp, CodeAct).
+
+req_uri(Config, Path) ->
+    rabbit_misc:format("~s/api~s", [
+        rabbit_ct_broker_helpers:node_uri(Config, 0, management),
+        Path
+      ]).
+
+req(Config, Type, Path, Headers) ->
+    httpc:request(Type,
+      {req_uri(Config, Path), Headers},
+      ?HTTPC_OPTS, []).
+
+req(Config, Type, Path, Headers, Body) ->
+    httpc:request(Type,
+      {req_uri(Config, Path), Headers, "application/json", Body},
+      ?HTTPC_OPTS, []).
+
+decode(?OK, _Headers,  ResBody) -> cleanup(mochijson2:decode(ResBody));
+decode(_,    Headers, _ResBody) -> Headers.
+
+cleanup(L) when is_list(L) ->
+    [cleanup(I) || I <- L];
+cleanup({struct, I}) ->
+    cleanup(I);
+cleanup({K, V}) when is_binary(K) ->
+    {list_to_atom(binary_to_list(K)), cleanup(V)};
+cleanup(I) ->
+    I.
+
+auth_header(Username, Password) ->
+    {"Authorization",
+     "Basic " ++ binary_to_list(base64:encode(Username ++ ":" ++ Password))}.
+
+assert_list(Exp, Act) ->
+    ?assertEqual(length(Exp), length(Act)),
+    [?assertEqual(1,
+        length(lists:filter(
+            fun(ActI) -> test_item(ExpI, ActI) end, Act)))
+      || ExpI <- Exp].
+
+assert_item(Exp, Act) ->
+    ?assertEqual([], test_item0(Exp, Act)).
+
+test_item(Exp, Act) ->
+    case test_item0(Exp, Act) of
+        [] -> true;
+        _  -> false
+    end.
+
+test_item0(Exp, Act) ->
+    [{did_not_find, ExpI, in, Act} || ExpI <- Exp,
+                                      not lists:member(ExpI, Act)].
diff --git a/rabbitmq-server/deps/rabbitmq_shovel_management/test/src/rabbit_shovel_mgmt_test_all.erl b/rabbitmq-server/deps/rabbitmq_shovel_management/test/src/rabbit_shovel_mgmt_test_all.erl
deleted file mode 100644 (file)
index b82c4e1..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-%%   The contents of this file are subject to the Mozilla Public License
-%%   Version 1.1 (the "License"); you may not use this file except in
-%%   compliance with the License. You may obtain a copy of the License at
-%%   http://www.mozilla.org/MPL/
-%%
-%%   Software distributed under the License is distributed on an "AS IS"
-%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%%   License for the specific language governing rights and limitations
-%%   under the License.
-%%
-%%   The Original Code is RabbitMQ Management Console.
-%%
-%%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(rabbit_shovel_mgmt_test_all).
-
--export([all_tests/0]).
-
-all_tests() ->
-    ok = eunit:test(tests(rabbit_shovel_mgmt_test_http, 60), [verbose]).
-
-tests(Module, Timeout) ->
-    {foreach, fun() -> ok end,
-     [{timeout, Timeout, fun () -> Module:F() end} ||
-         {F, _Arity} <- proplists:get_value(exports, Module:module_info()),
-         string:right(atom_to_list(F), 5) =:= "_test"]}.
diff --git a/rabbitmq-server/deps/rabbitmq_shovel_management/test/src/rabbit_shovel_mgmt_test_http.erl b/rabbitmq-server/deps/rabbitmq_shovel_management/test/src/rabbit_shovel_mgmt_test_http.erl
deleted file mode 100644 (file)
index b3407ce..0000000
+++ /dev/null
@@ -1,217 +0,0 @@
-%%   The contents of this file are subject to the Mozilla Public License
-%%   Version 1.1 (the "License"); you may not use this file except in
-%%   compliance with the License. You may obtain a copy of the License at
-%%   http://www.mozilla.org/MPL/
-%%
-%%   Software distributed under the License is distributed on an "AS IS"
-%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%%   License for the specific language governing rights and limitations
-%%   under the License.
-%%
-%%   The Original Code is RabbitMQ Management Console.
-%%
-%%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2010-2014 GoPivotal, Inc.  All rights reserved.
-%%
-
--module(rabbit_shovel_mgmt_test_http).
-
--include_lib("rabbitmq_management/include/rabbit_mgmt_test.hrl").
-
--import(rabbit_misc, [pget/2]).
-
-shovels_test() ->
-    http_put("/users/admin",  [{password, <<"admin">>},
-                               {tags, <<"administrator">>}], ?NO_CONTENT),
-    http_put("/users/mon",    [{password, <<"mon">>},
-                               {tags, <<"monitoring">>}], ?NO_CONTENT),
-    http_put("/vhosts/v", none, ?NO_CONTENT),
-    Perms = [{configure, <<".*">>},
-             {write,     <<".*">>},
-             {read,      <<".*">>}],
-    http_put("/permissions/v/guest",  Perms, ?NO_CONTENT),
-    http_put("/permissions/v/admin",  Perms, ?NO_CONTENT),
-    http_put("/permissions/v/mon",    Perms, ?NO_CONTENT),
-
-    [http_put("/parameters/shovel/" ++ V ++ "/my-dynamic",
-              [{value, [{'src-uri', <<"amqp://">>},
-                        {'dest-uri', <<"amqp://">>},
-                        {'src-queue', <<"test">>},
-                        {'dest-queue', <<"test2">>}]}], ?NO_CONTENT)
-     || V <- ["%2f", "v"]],
-    Static = [{name,  <<"my-static">>},
-              {type,  <<"static">>}],
-    Dynamic1 = [{name,  <<"my-dynamic">>},
-                {vhost, <<"/">>},
-                {type,  <<"dynamic">>}],
-    Dynamic2 = [{name,  <<"my-dynamic">>},
-                {vhost, <<"v">>},
-                {type,  <<"dynamic">>}],
-    Assert = fun (Req, User, Res) ->
-                     assert_list(Res, http_get(Req, User, User, ?OK))
-             end,
-    Assert("/shovels",     "guest", [Static, Dynamic1, Dynamic2]),
-    Assert("/shovels/%2f", "guest", [Dynamic1]),
-    Assert("/shovels/v",   "guest", [Dynamic2]),
-    Assert("/shovels",     "admin", [Static, Dynamic2]),
-    Assert("/shovels/%2f", "admin", []),
-    Assert("/shovels/v",   "admin", [Dynamic2]),
-    Assert("/shovels",     "mon", [Dynamic2]),
-    Assert("/shovels/%2f", "mon", []),
-    Assert("/shovels/v",   "mon", [Dynamic2]),
-
-    http_delete("/vhosts/v", ?NO_CONTENT),
-    http_delete("/users/admin", ?NO_CONTENT),
-    http_delete("/users/mon", ?NO_CONTENT),
-    ok.
-
-%% It's a bit arbitrary to be testing this here, but we want to be
-%% able to test that mgmt extensions can be started and stopped
-%% *somewhere*, and here is as good a place as any.
-dynamic_plugin_enable_disable_test() ->
-    http_get("/shovels", ?OK),
-    disable_plugin("rabbitmq_shovel_management"),
-    http_get("/shovels", ?NOT_FOUND),
-    http_get("/overview", ?OK),
-    disable_plugin("rabbitmq_management"),
-    http_fail("/shovels"),
-    http_fail("/overview"),
-    enable_plugin("rabbitmq_management"),
-    http_get("/shovels", ?NOT_FOUND),
-    http_get("/overview", ?OK),
-    enable_plugin("rabbitmq_shovel_management"),
-    http_get("/shovels", ?OK),
-    http_get("/overview", ?OK),
-    passed.
-
-%%---------------------------------------------------------------------------
-%% TODO this is mostly copypasta from the mgmt tests
-
-http_get(Path) ->
-    http_get(Path, ?OK).
-
-http_get(Path, CodeExp) ->
-    http_get(Path, "guest", "guest", CodeExp).
-
-http_get(Path, User, Pass, CodeExp) ->
-    {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
-        req(get, Path, [auth_header(User, Pass)]),
-    assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
-    decode(CodeExp, Headers, ResBody).
-
-http_fail(Path) ->
-    {error, {failed_connect, _}} = req(get, Path, []).
-
-http_put(Path, List, CodeExp) ->
-    http_put_raw(Path, format_for_upload(List), CodeExp).
-
-http_put(Path, List, User, Pass, CodeExp) ->
-    http_put_raw(Path, format_for_upload(List), User, Pass, CodeExp).
-
-http_post(Path, List, CodeExp) ->
-    http_post_raw(Path, format_for_upload(List), CodeExp).
-
-http_post(Path, List, User, Pass, CodeExp) ->
-    http_post_raw(Path, format_for_upload(List), User, Pass, CodeExp).
-
-format_for_upload(none) ->
-    <<"">>;
-format_for_upload(List) ->
-    iolist_to_binary(mochijson2:encode({struct, List})).
-
-http_put_raw(Path, Body, CodeExp) ->
-    http_upload_raw(put, Path, Body, "guest", "guest", CodeExp).
-
-http_put_raw(Path, Body, User, Pass, CodeExp) ->
-    http_upload_raw(put, Path, Body, User, Pass, CodeExp).
-
-http_post_raw(Path, Body, CodeExp) ->
-    http_upload_raw(post, Path, Body, "guest", "guest", CodeExp).
-
-http_post_raw(Path, Body, User, Pass, CodeExp) ->
-    http_upload_raw(post, Path, Body, User, Pass, CodeExp).
-
-http_upload_raw(Type, Path, Body, User, Pass, CodeExp) ->
-    {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
-        req(Type, Path, [auth_header(User, Pass)], Body),
-    assert_code(CodeExp, CodeAct, Type, Path, ResBody),
-    decode(CodeExp, Headers, ResBody).
-
-http_delete(Path, CodeExp) ->
-    http_delete(Path, "guest", "guest", CodeExp).
-
-http_delete(Path, User, Pass, CodeExp) ->
-    {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
-        req(delete, Path, [auth_header(User, Pass)]),
-    assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody),
-    decode(CodeExp, Headers, ResBody).
-
-assert_code(CodeExp, CodeAct, Type, Path, Body) ->
-    case CodeExp of
-        CodeAct -> ok;
-        _       -> throw({expected, CodeExp, got, CodeAct, type, Type,
-                          path, Path, body, Body})
-    end.
-
-req(Type, Path, Headers) ->
-    httpc:request(Type, {?PREFIX ++ Path, Headers}, ?HTTPC_OPTS, []).
-
-req(Type, Path, Headers, Body) ->
-    httpc:request(Type, {?PREFIX ++ Path, Headers, "application/json", Body},
-                  ?HTTPC_OPTS, []).
-
-decode(?OK, _Headers,  ResBody) -> cleanup(mochijson2:decode(ResBody));
-decode(_,    Headers, _ResBody) -> Headers.
-
-cleanup(L) when is_list(L) ->
-    [cleanup(I) || I <- L];
-cleanup({struct, I}) ->
-    cleanup(I);
-cleanup({K, V}) when is_binary(K) ->
-    {list_to_atom(binary_to_list(K)), cleanup(V)};
-cleanup(I) ->
-    I.
-
-auth_header(Username, Password) ->
-    {"Authorization",
-     "Basic " ++ binary_to_list(base64:encode(Username ++ ":" ++ Password))}.
-
-assert_list(Exp, Act) ->
-    case length(Exp) == length(Act) of
-        true  -> ok;
-        false -> throw({expected, Exp, actual, Act})
-    end,
-    [case length(lists:filter(fun(ActI) -> test_item(ExpI, ActI) end, Act)) of
-         1 -> ok;
-         N -> throw({found, N, ExpI, in, Act})
-     end || ExpI <- Exp].
-
-assert_item(Exp, Act) ->
-    case test_item0(Exp, Act) of
-        [] -> ok;
-        Or -> throw(Or)
-    end.
-
-test_item(Exp, Act) ->
-    case test_item0(Exp, Act) of
-        [] -> true;
-        _  -> false
-    end.
-
-test_item0(Exp, Act) ->
-    [{did_not_find, ExpI, in, Act} || ExpI <- Exp,
-                                      not lists:member(ExpI, Act)].
-%%---------------------------------------------------------------------------
-
-enable_plugin(Plugin) ->
-    plugins_action(enable, [Plugin], []).
-
-disable_plugin(Plugin) ->
-    plugins_action(disable, [Plugin], []).
-
-plugins_action(Command, Args, Opts) ->
-    PluginsFile = os:getenv("RABBITMQ_ENABLED_PLUGINS_FILE"),
-    PluginsDir = os:getenv("RABBITMQ_PLUGINS_DIR"),
-    Node = node(),
-    rpc:call(Node, rabbit_plugins_main, action,
-             [Command, Node, Args, Opts, PluginsFile, PluginsDir]).
diff --git a/rabbitmq-server/deps/rabbitmq_stomp/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_stomp/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index 0f8486404d8bf18250cf33ba28c69842699bed22..6ae483e8427474b5e4a451e05dc64774383f78e8 100644 (file)
@@ -1,7 +1,7 @@
 PROJECT = rabbitmq_stomp
 
 DEPS = amqp_client
-TEST_DEPS = rabbitmq_test
+TEST_DEPS = rabbit rabbitmq_test
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
@@ -12,34 +12,9 @@ ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
 ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
+
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
+
 include erlang.mk
 
-# --------------------------------------------------------------------
-# Testing.
-# --------------------------------------------------------------------
-
-WITH_BROKER_TEST_MAKEVARS := \
-       RABBITMQ_CONFIG_FILE=$(TEST_TMPDIR)/etc/test
-WITH_BROKER_TEST_ENVVARS := \
-       PYTHONPATH=$(CURDIR)/test/deps/pika/pika:$(CURDIR)/test/deps/stomppy/stomppy:$(PYTHONPATH) \
-       SSL_CERTS_PATH=$(TEST_TMPDIR)/etc/certs
-WITH_BROKER_TEST_SCRIPTS := \
-       test/src/test.py \
-       test/src/test_connect_options.py \
-       test/src/test_ssl.py
-WITH_BROKER_TEST_COMMANDS := \
-       rabbit_stomp_test:all_tests() \
-       rabbit_stomp_amqqueue_test:all_tests()
-
-STANDALONE_TEST_COMMANDS := \
-       eunit:test([rabbit_stomp_test_util,rabbit_stomp_test_frame],[verbose])
-
-pre-standalone-tests:: test-build test-tmpdir
-       $(verbose) rm -rf $(TEST_TMPDIR)/etc
-       $(exec_verbose) mkdir -p $(TEST_TMPDIR)/etc/certs
-       $(verbose) sed -E -e "s|%%CERTS_DIR%%|$(TEST_TMPDIR)/etc/certs|g" \
-               < test/src/test.config > $(TEST_TMPDIR)/etc/test.config
-       $(verbose) $(MAKE) -C $(DEPS_DIR)/rabbitmq_test/certs all PASSWORD=bunnychow \
-               DIR=$(TEST_TMPDIR)/etc/certs
-       $(verbose) $(MAKE) -C test/deps/stomppy
-       $(verbose) $(MAKE) -C test/deps/pika
+
index 423c9bc93f8665e68bdce4d2dc4251e5537d9b3a..455d31019e83b8d158983767977e7d5a9200032f 100644 (file)
@@ -16,6 +16,7 @@
 
 -record(stomp_configuration, {default_login,
                               default_passcode,
+                              force_default_creds = false,
                               implicit_connect,
                               ssl_cert_login}).
 
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index 28b4a5f9a075040f157dee5c250ed1e4bc4d8d6a..e972a57285c3852557c3a3844fed3e50515ed214 100644 (file)
@@ -18,7 +18,8 @@
 -behaviour(supervisor2).
 -behaviour(ranch_protocol).
 
--define(MAX_WAIT, 16#ffffffff).
+-include_lib("rabbit_common/include/rabbit.hrl").
+
 -export([start_link/4, init/1]).
 
 start_link(Ref, Sock, _Transport, Configuration) ->
@@ -39,7 +40,7 @@ start_link(Ref, Sock, _Transport, Configuration) ->
                         {rabbit_stomp_reader,
                          {rabbit_stomp_reader,
                           start_link, [HelperPid, Ref, Sock, Configuration]},
-                         intrinsic, ?MAX_WAIT, worker,
+                         intrinsic, ?WORKER_WAIT, worker,
                          [rabbit_stomp_reader]}),
 
     {ok, SupPid, ReaderPid}.
index ebe2158f87636a79eabfc09f13e701ee596c1779..8fb68fa0179a8bd946cecd9a386476a35bd278a0 100644 (file)
@@ -17,9 +17,9 @@
 -module(rabbit_stomp_processor).
 
 -export([initial_state/2, process_frame/2, flush_and_die/1]).
--export([flush_pending_receipts/3, 
-         handle_exit/3, 
-         cancel_consumer/2, 
+-export([flush_pending_receipts/3,
+         handle_exit/3,
+         cancel_consumer/2,
          send_delivery/5]).
 
 -export([adapter_name/1]).
@@ -33,7 +33,7 @@
 -record(proc_state, {session_id, channel, connection, subscriptions,
                 version, start_heartbeat_fun, pending_receipts,
                 config, route_state, reply_queues, frame_transformer,
-                adapter_info, send_fun, receive_fun, ssl_login_name, peer_addr,
+                adapter_info, send_fun, ssl_login_name, peer_addr,
                 %% see rabbitmq/rabbitmq-stomp#39
                 trailing_lf}).
 
@@ -46,16 +46,13 @@ adapter_name(State) ->
   Name.
 
 %%----------------------------------------------------------------------------
--ifdef(use_spec).
 
 -spec initial_state(
-  #stomp_configuration{}, 
-  {SendFun, ReceiveFun, AdapterInfo, StartHeartbeatFun, SSLLoginName, PeerAddr})
+  #stomp_configuration{},
+  {SendFun, AdapterInfo, SSLLoginName, PeerAddr})
     -> #proc_state{}
   when SendFun :: fun((atom(), binary()) -> term()),
-       ReceiveFun :: fun(() -> ok),
        AdapterInfo :: #amqp_adapter_info{},
-       StartHeartbeatFun :: fun((non_neg_integer(), fun(), non_neg_integer(), fun()) -> term()),
        SSLLoginName :: atom() | binary(),
        PeerAddr :: inet:ip_address().
 
@@ -63,17 +60,17 @@ adapter_name(State) ->
     {ok, #proc_state{}} |
     {stop, term(), #proc_state{}}.
 
--spec process_frame(#stomp_frame{}, #proc_state{}) -> 
+-spec process_frame(#stomp_frame{}, #proc_state{}) ->
     process_frame_result().
 
 -spec flush_and_die(#proc_state{}) -> ok.
 
--spec command({Command, Frame}, State) -> process_frame_result() 
+-spec command({Command, Frame}, State) -> process_frame_result()
     when Command :: string(),
          Frame   :: #stomp_frame{},
          State   :: #proc_state{}.
 
--type process_fun() :: fun((#proc_state{}) -> 
+-type process_fun() :: fun((#proc_state{}) ->
         {ok, #stomp_frame{}, #proc_state{}}  |
         {error, string(), string(), #proc_state{}} |
         {stop, term(), #proc_state{}}).
@@ -92,10 +89,9 @@ adapter_name(State) ->
 
 -spec cancel_consumer(binary(), #proc_state{}) -> process_frame_result().
 
--spec send_delivery(#'basic.deliver'{}, term(), term(), term(), 
+-spec send_delivery(#'basic.deliver'{}, term(), term(), term(),
                     #proc_state{}) -> #proc_state{}.
 
--endif.
 %%----------------------------------------------------------------------------
 
 
@@ -109,13 +105,22 @@ process_frame(Frame = #stomp_frame{command = Command}, State) ->
 flush_and_die(State) ->
     close_connection(State).
 
-initial_state(Configuration, 
-    {SendFun, ReceiveFun, AdapterInfo, StartHeartbeatFun, SSLLoginName, PeerAddr}) ->
+initial_state(Configuration,
+    {SendFun, AdapterInfo0 = #amqp_adapter_info{additional_info = Extra},
+     SSLLoginName, PeerAddr}) ->
+  %% STOMP connections use exactly one channel. The frame max is not
+  %% applicable and there is no way to know what client is used.
+  AdapterInfo = AdapterInfo0#amqp_adapter_info{additional_info=[
+       {channels, 1},
+       {channel_max, 1},
+       {frame_max, 0},
+       %% TODO: can we use a header to make it possible for clients
+       %%       to override this value?
+       {client_properties, [{<<"product">>, longstr, <<"STOMP client">>}]}
+       |Extra]},
   #proc_state {
        send_fun            = SendFun,
-       receive_fun         = ReceiveFun,
        adapter_info        = AdapterInfo,
-       start_heartbeat_fun = StartHeartbeatFun,
        ssl_login_name      = SSLLoginName,
        peer_addr           = PeerAddr,
        session_id          = none,
@@ -140,7 +145,7 @@ command({"CONNECT", Frame}, State) ->
 command(Request, State = #proc_state{channel = none,
                              config = #stomp_configuration{
                              implicit_connect = true}}) ->
-    {ok, State1 = #proc_state{channel = Ch}} =
+    {ok, State1 = #proc_state{channel = Ch}, _} =
         process_connect(implicit, #stomp_frame{headers = []}, State),
     case Ch of
         none -> {stop, normal, State1};
@@ -152,7 +157,7 @@ command(_Request, State = #proc_state{channel = none,
                               implicit_connect = false}}) ->
     {ok, send_error("Illegal command",
                     "You must log in using CONNECT first",
-                    State)};
+                    State), none};
 
 command({Command, Frame}, State = #proc_state{frame_transformer = FT}) ->
     Frame1 = FT(Frame),
@@ -168,7 +173,7 @@ command({Command, Frame}, State = #proc_state{frame_transformer = FT}) ->
 
 cancel_consumer(Ctag, State) ->
   process_request(
-    fun(StateN) -> server_cancel_consumer(Ctag, StateN) end, 
+    fun(StateN) -> server_cancel_consumer(Ctag, StateN) end,
     State).
 
 handle_exit(Conn, {shutdown, {server_initiated_close, Code, Explanation}},
@@ -182,6 +187,10 @@ handle_exit(Conn, Reason, State = #proc_state{connection = Conn}) ->
     send_error("AMQP connection died", "Reason: ~p", [Reason], State),
     {stop, {conn_died, Reason}, State};
 
+handle_exit(Ch, {shutdown, {server_initiated_close, Code, Explanation}},
+            State = #proc_state{channel = Ch}) ->
+    amqp_death(Code, Explanation, State);
+
 handle_exit(Ch, Reason, State = #proc_state{channel = Ch}) ->
     send_error("AMQP channel died", "Reason: ~p", [Reason], State),
     {stop, {channel_died, Reason}, State};
@@ -195,7 +204,7 @@ process_request(ProcessFun, State) ->
     process_request(ProcessFun, fun (StateM) -> StateM end, State).
 
 
-process_request(ProcessFun, SuccessFun, State) ->
+process_request(ProcessFun, SuccessFun, State=#proc_state{connection=Conn}) ->
     Res = case catch ProcessFun(State) of
               {'EXIT',
                {{shutdown,
@@ -213,9 +222,9 @@ process_request(ProcessFun, SuccessFun, State) ->
                 none -> ok;
                 _    -> send_frame(Frame, NewState)
             end,
-            {ok, SuccessFun(NewState)};
+            {ok, SuccessFun(NewState), Conn};
         {error, Message, Detail, NewState} ->
-            {ok, send_error(Message, Detail, NewState)};
+            {ok, send_error(Message, Detail, NewState), Conn};
         {stop, normal, NewState} ->
             {stop, normal, SuccessFun(NewState)};
         {stop, R, NewState} ->
@@ -257,6 +266,10 @@ process_connect(Implicit, Frame,
       end,
       State).
 
+creds(_, _, #stomp_configuration{default_login       = DefLogin,
+                                 default_passcode    = DefPasscode,
+                                 force_default_creds = true}) ->
+    {DefLogin, DefPasscode};
 creds(Frame, SSLLoginName,
       #stomp_configuration{default_login    = DefLogin,
                            default_passcode = DefPasscode}) ->
@@ -541,19 +554,22 @@ do_login(Username, Passwd, VirtualHost, Heartbeat, AdapterInfo, Version,
             link(Channel),
             amqp_channel:enable_delivery_flow_control(Channel),
             SessionId = rabbit_guid:string(rabbit_guid:gen_secure(), "session"),
-            {{SendTimeout, ReceiveTimeout}, State1} =
-                ensure_heartbeats(Heartbeat, State),
-            ok("CONNECTED",
-               [{?HEADER_SESSION, SessionId},
-                {?HEADER_HEART_BEAT,
-                 io_lib:format("~B,~B", [SendTimeout, ReceiveTimeout])},
-                {?HEADER_SERVER, server_header()},
-                {?HEADER_VERSION, Version}],
+            {SendTimeout, ReceiveTimeout} = ensure_heartbeats(Heartbeat),
+
+          Headers = [{?HEADER_SESSION, SessionId},
+                     {?HEADER_HEART_BEAT,
+                      io_lib:format("~B,~B", [SendTimeout, ReceiveTimeout])},
+                     {?HEADER_VERSION, Version}],
+          ok("CONNECTED",
+              case rabbit_misc:get_env(rabbitmq_stomp, hide_server_info, false) of
+                true  -> Headers;
+                false -> [{?HEADER_SERVER, server_header()} | Headers]
+              end,
                "",
-               State1#proc_state{session_id = SessionId,
-                            channel    = Channel,
-                            connection = Connection,
-                            version    = Version});
+               State#proc_state{session_id = SessionId,
+                                channel    = Channel,
+                                connection = Connection,
+                                version    = Version});
         {error, {auth_failure, _}} ->
             rabbit_log:warning("STOMP login failed for user ~p~n",
                                [binary_to_list(Username)]),
@@ -976,24 +992,16 @@ perform_transaction_action({Method, Props, BodyFragments}, State) ->
 %% Heartbeat Management
 %%--------------------------------------------------------------------
 
-%TODO heartbeats
-ensure_heartbeats(_, State = #proc_state{start_heartbeat_fun = undefined}) ->
-    {{0, 0}, State};
-ensure_heartbeats(Heartbeats,
-                  State = #proc_state{start_heartbeat_fun = SHF,
-                                      send_fun            = RawSendFun,
-                                      receive_fun         = ReceiveFun}) ->
+ensure_heartbeats(Heartbeats) ->
+
     [CX, CY] = [list_to_integer(X) ||
                    X <- re:split(Heartbeats, ",", [{return, list}])],
 
-    SendFun = fun() -> RawSendFun(sync, <<$\n>>) end,
-
     {SendTimeout, ReceiveTimeout} =
         {millis_to_seconds(CY), millis_to_seconds(CX)},
 
-    SHF(SendTimeout, SendFun, ReceiveTimeout, ReceiveFun),
-
-    {{SendTimeout * 1000 , ReceiveTimeout * 1000}, State}.
+    rabbit_stomp_reader:start_heartbeats(self(), {SendTimeout, ReceiveTimeout}),
+    {SendTimeout * 1000 , ReceiveTimeout * 1000}.
 
 millis_to_seconds(M) when M =< 0   -> 0;
 millis_to_seconds(M) when M < 1000 -> 1;
index 68a2a0b3ac0424897cfba5685378eafef2c63819..d56a1fe01bbfc232c664bf40b9479e2e6ff381db 100644 (file)
 -export([conserve_resources/3]).
 -export([init/1, handle_call/3, handle_cast/2, handle_info/2,
          code_change/3, terminate/2]).
+-export([start_heartbeats/2]).
 
 -include("rabbit_stomp.hrl").
 -include("rabbit_stomp_frame.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
 
 -record(reader_state, {socket, conn_name, parse_state, processor_state, state,
-                       conserve_resources, recv_outstanding,
-                       parent}).
+                       conserve_resources, recv_outstanding, stats_timer,
+                       parent, connection, heartbeat_sup, heartbeat}).
 
 %%----------------------------------------------------------------------------
 
@@ -55,7 +56,7 @@ init([SupHelperPid, Ref, Sock, Configuration]) ->
 
     case rabbit_net:connection_string(Sock, inbound) of
         {ok, ConnStr} ->
-            ProcInitArgs = processor_args(SupHelperPid, Configuration, Sock),
+            ProcInitArgs = processor_args(Configuration, Sock),
             ProcState = rabbit_stomp_processor:initial_state(Configuration,
                                                              ProcInitArgs),
 
@@ -65,14 +66,17 @@ init([SupHelperPid, Ref, Sock, Configuration]) ->
             ParseState = rabbit_stomp_frame:initial_state(),
             register_resource_alarm(),
             gen_server2:enter_loop(?MODULE, [],
-              run_socket(control_throttle(
-                #reader_state{socket             = Sock,
-                              conn_name          = ConnStr,
-                              parse_state        = ParseState,
-                              processor_state    = ProcState,
-                              state              = running,
-                              conserve_resources = false,
-                              recv_outstanding   = false})),
+              rabbit_event:init_stats_timer(
+                run_socket(control_throttle(
+                  #reader_state{socket             = Sock,
+                                conn_name          = ConnStr,
+                                parse_state        = ParseState,
+                                processor_state    = ProcState,
+                                heartbeat_sup      = SupHelperPid,
+                                heartbeat          = {none, none},
+                                state              = running,
+                                conserve_resources = false,
+                                recv_outstanding   = false})), #reader_state.stats_timer),
               {backoff, 1000, 1000, 10000});
         {network_error, Reason} ->
             rabbit_net:fast_close(Sock),
@@ -97,10 +101,10 @@ handle_cast(Msg, State) ->
 
 handle_info({inet_async, _Sock, _Ref, {ok, Data}}, State) ->
     case process_received_bytes(Data, State#reader_state{recv_outstanding = false}) of
-        {ok, NewState} ->
-            {noreply, run_socket(control_throttle(NewState)), hibernate};
-        {stop, Reason, NewState} ->
-            {stop, Reason, NewState}
+      {ok, NewState} ->
+          {noreply, ensure_stats_timer(run_socket(control_throttle(NewState))), hibernate};
+      {stop, Reason, NewState} ->
+          {stop, Reason, NewState}
     end;
 handle_info({inet_async, _Sock, _Ref, {error, closed}}, State) ->
     {stop, normal, State};
@@ -112,6 +116,8 @@ handle_info({inet_reply, _, ok}, State) ->
     {noreply, State, hibernate};
 handle_info({inet_reply, _, Status}, State) ->
     {stop, Status, State};
+handle_info(emit_stats, State) ->
+    {noreply, emit_stats(State), hibernate};
 handle_info({conserve_resources, Conserve}, State) ->
     NewState = State#reader_state{conserve_resources = Conserve},
     {noreply, run_socket(control_throttle(NewState)), hibernate};
@@ -150,18 +156,32 @@ handle_info({Delivery = #'basic.deliver'{},
 handle_info(#'basic.cancel'{consumer_tag = Ctag}, State) ->
     ProcState = processor_state(State),
     case rabbit_stomp_processor:cancel_consumer(Ctag, ProcState) of
-        {ok, NewProcState} ->
-            {noreply, processor_state(NewProcState, State), hibernate};
-        {stop, Reason, NewProcState} ->
-            {stop, Reason, processor_state(NewProcState, State)}
+      {ok, NewProcState, _} ->
+        {noreply, processor_state(NewProcState, State), hibernate};
+      {stop, Reason, NewProcState} ->
+        {stop, Reason, processor_state(NewProcState, State)}
     end;
 
+handle_info({start_heartbeats, {0, 0}}, State) -> 
+    {noreply, State};
+
+handle_info({start_heartbeats, {SendTimeout, ReceiveTimeout}},
+            State = #reader_state{heartbeat_sup = SupPid, socket = Sock}) ->
+
+    SendFun = fun() -> catch rabbit_net:send(Sock, <<$\n>>) end,
+    Pid = self(),
+    ReceiveFun = fun() -> gen_server2:cast(Pid, client_timeout) end,
+    Heartbeat = rabbit_heartbeat:start(SupPid, Sock, SendTimeout,
+                                       SendFun, ReceiveTimeout, ReceiveFun),
+    {noreply, State#reader_state{heartbeat = Heartbeat}};
+
+
 %%----------------------------------------------------------------------------
 handle_info({'EXIT', From, Reason}, State) ->
   ProcState = processor_state(State),
   case rabbit_stomp_processor:handle_exit(From, Reason, ProcState) of
-    {stop, Reason, NewProcState} ->
-        {stop, Reason, processor_state(NewProcState, State)};
+    {stop, NewReason, NewProcState} ->
+        {stop, NewReason, processor_state(NewProcState, State)};
     unknown_exit ->
         {stop, {connection_died, Reason}, State}
   end.
@@ -172,19 +192,19 @@ process_received_bytes([], State) ->
 process_received_bytes(Bytes,
                        State = #reader_state{
                          processor_state = ProcState,
-                         parse_state     = ParseState,
-                         state           = S}) ->
+                         parse_state     = ParseState}) ->
     case rabbit_stomp_frame:parse(Bytes, ParseState) of
         {more, ParseState1} ->
             {ok, State#reader_state{parse_state = ParseState1}};
         {ok, Frame, Rest} ->
             case rabbit_stomp_processor:process_frame(Frame, ProcState) of
-                {ok, NewProcState} ->
+                {ok, NewProcState, Conn} ->
                     PS = rabbit_stomp_frame:initial_state(),
-                    process_received_bytes(Rest, State#reader_state{
+                    NextState = maybe_block(State, Frame),
+                    process_received_bytes(Rest, NextState#reader_state{
                         processor_state = NewProcState,
                         parse_state     = PS,
-                        state           = next_state(S, Frame)});
+                        connection      = Conn});
                 {stop, Reason, NewProcState} ->
                     {stop, Reason,
                      processor_state(NewProcState, State)}
@@ -199,7 +219,7 @@ process_received_bytes(Bytes,
             {stop, normal, State}
     end.
 
-conserve_resources(Pid, _Source, Conserve) ->
+conserve_resources(Pid, _Source, {_, Conserve, _}) ->
     Pid ! {conserve_resources, Conserve},
     ok.
 
@@ -208,18 +228,23 @@ register_resource_alarm() ->
 
 
 control_throttle(State = #reader_state{state              = CS,
-                                       conserve_resources = Mem}) ->
+                                       conserve_resources = Mem,
+                                       heartbeat = Heartbeat}) ->
     case {CS, Mem orelse credit_flow:blocked()} of
         {running,   true} -> State#reader_state{state = blocking};
-        {blocking, false} -> State#reader_state{state = running};
-        {blocked,  false} -> State#reader_state{state = running};
+        {blocking, false} -> rabbit_heartbeat:resume_monitor(Heartbeat),
+                             State#reader_state{state = running};
+        {blocked,  false} -> rabbit_heartbeat:resume_monitor(Heartbeat),
+                             State#reader_state{state = running};
         {_,            _} -> State
     end.
 
-next_state(blocking, #stomp_frame{command = "SEND"}) ->
-    blocked;
-next_state(S, _) ->
-    S.
+maybe_block(State = #reader_state{state = blocking, heartbeat = Heartbeat}, 
+            #stomp_frame{command = "SEND"}) ->
+    rabbit_heartbeat:pause_monitor(Heartbeat),
+    State#reader_state{state = blocked};
+maybe_block(State, _) ->
+    State.
 
 run_socket(State = #reader_state{state = blocked}) ->
     State;
@@ -231,9 +256,10 @@ run_socket(State = #reader_state{socket = Sock}) ->
 
 
 terminate(Reason, State = #reader_state{ processor_state = ProcState }) ->
-    log_reason(Reason, State),
-    rabbit_stomp_processor:flush_and_die(ProcState),
-    ok.
+  maybe_emit_stats(State),
+  log_reason(Reason, State),
+  rabbit_stomp_processor:flush_and_die(ProcState),
+  ok.
 
 code_change(_OldVsn, State, _Extra) ->
     {ok, State}.
@@ -279,12 +305,17 @@ log_reason({shutdown, client_heartbeat_timeout},
                        "on connection ~s, closing it~n", [AdapterName]);
 
 log_reason(normal, #reader_state{ conn_name  = ConnName}) ->
-    log(info, "closing STOMP connection ~p (~s)~n", [self(), ConnName]).
+    log(info, "closing STOMP connection ~p (~s)~n", [self(), ConnName]);
+
+log_reason(Reason, #reader_state{ processor_state = ProcState }) ->
+    AdapterName = rabbit_stomp_processor:adapter_name(ProcState),
+    rabbit_log:warning("STOMP connection ~s terminated"
+                       " with reason ~p, closing it~n", [AdapterName, Reason]).
 
 
 %%----------------------------------------------------------------------------
 
-processor_args(SupPid, Configuration, Sock) ->
+processor_args(Configuration, Sock) ->
     SendFun = fun (sync, IoData) ->
                       %% no messages emitted
                       catch rabbit_net:send(Sock, IoData);
@@ -296,17 +327,8 @@ processor_args(SupPid, Configuration, Sock) ->
                       %% bug 21365.
                       catch rabbit_net:port_command(Sock, IoData)
               end,
-
-    Pid = self(),
-    ReceiveFun = fun() -> gen_server2:cast(Pid, client_timeout) end,
-
-    StartHeartbeatFun =
-        fun (SendTimeout, SendFn, ReceiveTimeout, ReceiveFn) ->
-                rabbit_heartbeat:start(SupPid, Sock, SendTimeout,
-                                       SendFn, ReceiveTimeout, ReceiveFn)
-        end,
     {ok, {PeerAddr, _PeerPort}} = rabbit_net:sockname(Sock),
-    {SendFun, ReceiveFun, adapter_info(Sock), StartHeartbeatFun,
+    {SendFun, adapter_info(Sock), 
      ssl_login_name(Sock, Configuration), PeerAddr}.
 
 adapter_info(Sock) ->
@@ -327,6 +349,29 @@ ssl_login_name(Sock, #stomp_configuration{ssl_cert_login = true}) ->
 
 %%----------------------------------------------------------------------------
 
+start_heartbeats(_,   {0,0}    ) -> ok;
+start_heartbeats(Pid, Heartbeat) -> Pid ! {start_heartbeats, Heartbeat}.
+
+maybe_emit_stats(State) ->
+    rabbit_event:if_enabled(State, #reader_state.stats_timer,
+                            fun() -> emit_stats(State) end).
+
+emit_stats(State=#reader_state{socket = Sock, state = ConnState, connection = Conn}) ->
+    SockInfos = case rabbit_net:getstat(Sock,
+            [recv_oct, recv_cnt, send_oct, send_cnt, send_pend]) of
+        {ok,    SI} -> SI;
+        {error,  _} -> []
+    end,
+    Infos = [{pid, Conn}, {state, ConnState} | SockInfos],
+    rabbit_event:notify(connection_stats, Infos),
+    State1 = rabbit_event:reset_stats_timer(State, #reader_state.stats_timer),
+    ensure_stats_timer(State1).
+
+ensure_stats_timer(State = #reader_state{}) ->
+    rabbit_event:ensure_stats_timer(State, #reader_state.stats_timer, emit_stats).
+
+%%----------------------------------------------------------------------------
+
 
 processor_state(#reader_state{ processor_state = ProcState }) -> ProcState.
 processor_state(ProcState, #reader_state{} = State) ->
index 83e69d4216017a34d4408b81cb95ac918675f0c1..122b36a6f0fec664c0f07fa57fb0ba0c234f53cd 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_stomp,
- [{description, "Embedded Rabbit Stomp Adapter"},
-  {vsn, "3.6.1"},
+ [{description, "RabbitMQ STOMP plugin"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {mod, {rabbit_stomp, []}},
@@ -17,5 +17,7 @@
          {tcp_listen_options, [{backlog,   128},
                                {nodelay,   true}]},
         %% see rabbitmq/rabbitmq-stomp#39
-        {trailing_lf, true}]},
+        {trailing_lf, true},
+        %% see rabbitmq/rabbitmq-stomp#57
+        {hide_server_info, false}]},
   {applications, [kernel, stdlib, rabbit, amqp_client]}]}.
similarity index 61%
rename from rabbitmq-server/deps/rabbitmq_stomp/test/src/rabbit_stomp_amqqueue_test.erl
rename to rabbitmq-server/deps/rabbitmq_stomp/test/amqqueue_SUITE.erl
index 637ca61ec8990746ad7fd7c684830c84db709176..300e86d82498a490abe3505b4aac8a52a8799c0a 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_stomp_amqqueue_test).
--export([all_tests/0]).
+-module(amqqueue_SUITE).
+
 -compile(export_all).
 
+-include_lib("common_test/include/ct.hrl").
 -include_lib("eunit/include/eunit.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
 -include("rabbit_stomp.hrl").
 -define(QUEUE, <<"TestQueue">>).
 -define(DESTINATION, "/amq/queue/TestQueue").
 
-all_tests() ->
-    [[ok = run_test(TestFun, Version)
-      || TestFun <- [fun test_subscribe_error/3,
-                     fun test_subscribe/3,
-                     fun test_unsubscribe_ack/3,
-                     fun test_subscribe_ack/3,
-                     fun test_send/3,
-                     fun test_delete_queue_subscribe/3,
-                     fun test_temp_destination_queue/3,
-                     fun test_temp_destination_in_send/3,
-                     fun test_blank_destination_in_send/3]]
-     || Version <- ?SUPPORTED_VERSIONS],
-    ok.
-
-run_test(TestFun, Version) ->
-    {ok, Connection} = amqp_connection:start(#amqp_params_direct{}),
+all() ->
+    [{group, list_to_atom("version_" ++ V)} || V <- ?SUPPORTED_VERSIONS].
+
+groups() ->
+    Tests = [
+        publish_no_dest_error,
+        publish_unauthorized_error,
+        subscribe_error,
+        subscribe,
+        unsubscribe_ack,
+        subscribe_ack,
+        send,
+        delete_queue_subscribe,
+        temp_destination_queue,
+        temp_destination_in_send,
+        blank_destination_in_send
+    ],
+
+    [{list_to_atom("version_" ++ V), [sequence], Tests}
+     || V <- ?SUPPORTED_VERSIONS].
+
+init_per_suite(Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config,
+                                           [{rmq_nodename_suffix, ?MODULE}]),
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+                                      rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+    Version = string:sub_string(atom_to_list(Group), 9),
+    rabbit_ct_helpers:set_config(Config, [{version, Version}]).
+
+end_per_group(_Group, Config) -> Config.
+
+init_per_testcase(TestCase, Config) ->
+    Version = ?config(version, Config),
+    StompPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp),
+    {ok, Connection} = amqp_connection:start(#amqp_params_direct{
+        node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename)
+    }),
     {ok, Channel} = amqp_connection:open_channel(Connection),
-    {ok, Client} = rabbit_stomp_client:connect(Version),
-
-    Result = (catch TestFun(Channel, Client, Version)),
-
+    {ok, Client} = rabbit_stomp_client:connect(Version, StompPort),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {amqp_connection, Connection},
+        {amqp_channel, Channel},
+        {stomp_client, Client}
+      ]),
+    init_per_testcase0(TestCase, Config1).
+
+end_per_testcase(TestCase, Config) ->
+    Connection = ?config(amqp_connection, Config),
+    Channel = ?config(amqp_channel, Config),
+    Client = ?config(stomp_client, Config),
     rabbit_stomp_client:disconnect(Client),
     amqp_channel:close(Channel),
     amqp_connection:close(Connection),
-    Result.
+    end_per_testcase0(TestCase, Config).
+
+init_per_testcase0(publish_unauthorized_error, Config) ->
+    Channel = ?config(amqp_channel, Config),
+    #'queue.declare_ok'{} =
+        amqp_channel:call(Channel, #'queue.declare'{queue       = <<"RestrictedQueue">>,
+                                                    auto_delete = true}),
+
+    rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, add_user, [<<"user">>, <<"pass">>]),
+    rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, set_permissions, [
+        <<"user">>, <<"/">>, <<"nothing">>, <<"nothing">>, <<"nothing">>]),
+    Version = ?config(version, Config),
+    StompPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp),
+    {ok, ClientFoo} = rabbit_stomp_client:connect(Version, "user", "pass", StompPort),
+    rabbit_ct_helpers:set_config(Config, [{client_foo, ClientFoo}]);
+init_per_testcase0(_, Config) ->
+    Config.
+
+end_per_testcase0(publish_unauthorized_error, Config) ->
+    ClientFoo = ?config(client_foo, Config),
+    rabbit_stomp_client:disconnect(ClientFoo),
+    rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_auth_backend_internal, delete_user, [<<"user">>]),
+    Config;
+end_per_testcase0(_, Config) ->
+    Config.
+
+publish_no_dest_error(Config) ->
+    Client = ?config(stomp_client, Config),
+    rabbit_stomp_client:send(
+      Client, "SEND", [{"destination", "/exchange/non-existent"}], ["hello"]),
+    {ok, _Client1, Hdrs, _} = stomp_receive(Client, "ERROR"),
+    "not_found" = proplists:get_value("message", Hdrs),
+    ok.
+
+publish_unauthorized_error(Config) ->
+    ClientFoo = ?config(client_foo, Config),
+    rabbit_stomp_client:send(
+      ClientFoo, "SEND", [{"destination", "/amq/queue/RestrictedQueue"}], ["hello"]),
+    {ok, _Client1, Hdrs, _} = stomp_receive(ClientFoo, "ERROR"),
+    "access_refused" = proplists:get_value("message", Hdrs),
+    ok.
 
-test_subscribe_error(_Channel, Client, _Version) ->
+subscribe_error(Config) ->
+    Client = ?config(stomp_client, Config),
     %% SUBSCRIBE to missing queue
     rabbit_stomp_client:send(
       Client, "SUBSCRIBE", [{"destination", ?DESTINATION}]),
@@ -61,7 +138,9 @@ test_subscribe_error(_Channel, Client, _Version) ->
     "not_found" = proplists:get_value("message", Hdrs),
     ok.
 
-test_subscribe(Channel, Client, _Version) ->
+subscribe(Config) ->
+    Channel = ?config(amqp_channel, Config),
+    Client = ?config(stomp_client, Config),
     #'queue.declare_ok'{} =
         amqp_channel:call(Channel, #'queue.declare'{queue       = ?QUEUE,
                                                     auto_delete = true}),
@@ -80,7 +159,10 @@ test_subscribe(Channel, Client, _Version) ->
     {ok, _Client2, _, [<<"hello">>]} = stomp_receive(Client1, "MESSAGE"),
     ok.
 
-test_unsubscribe_ack(Channel, Client, Version) ->
+unsubscribe_ack(Config) ->
+    Channel = ?config(amqp_channel, Config),
+    Client = ?config(stomp_client, Config),
+    Version = ?config(version, Config),
     #'queue.declare_ok'{} =
         amqp_channel:call(Channel, #'queue.declare'{queue       = ?QUEUE,
                                                     auto_delete = true}),
@@ -115,7 +197,10 @@ test_unsubscribe_ack(Channel, Client, Version) ->
                  proplists:get_value("message", Hdrs2)),
     ok.
 
-test_subscribe_ack(Channel, Client, Version) ->
+subscribe_ack(Config) ->
+    Channel = ?config(amqp_channel, Config),
+    Client = ?config(stomp_client, Config),
+    Version = ?config(version, Config),
     #'queue.declare_ok'{} =
         amqp_channel:call(Channel, #'queue.declare'{queue       = ?QUEUE,
                                                     auto_delete = true}),
@@ -145,7 +230,9 @@ test_subscribe_ack(Channel, Client, Version) ->
         amqp_channel:call(Channel, #'basic.get'{queue = ?QUEUE}),
     ok.
 
-test_send(Channel, Client, _Version) ->
+send(Config) ->
+    Channel = ?config(amqp_channel, Config),
+    Client = ?config(stomp_client, Config),
     #'queue.declare_ok'{} =
         amqp_channel:call(Channel, #'queue.declare'{queue       = ?QUEUE,
                                                     auto_delete = true}),
@@ -162,7 +249,9 @@ test_send(Channel, Client, _Version) ->
     {ok, _Client2, _, [<<"hello">>]} = stomp_receive(Client1, "MESSAGE"),
     ok.
 
-test_delete_queue_subscribe(Channel, Client, _Version) ->
+delete_queue_subscribe(Config) ->
+    Channel = ?config(amqp_channel, Config),
+    Client = ?config(stomp_client, Config),
     #'queue.declare_ok'{} =
         amqp_channel:call(Channel, #'queue.declare'{queue       = ?QUEUE,
                                                     auto_delete = true}),
@@ -183,7 +272,9 @@ test_delete_queue_subscribe(Channel, Client, _Version) ->
     % server closes connection
     ok.
 
-test_temp_destination_queue(Channel, Client, _Version) ->
+temp_destination_queue(Config) ->
+    Channel = ?config(amqp_channel, Config),
+    Client = ?config(stomp_client, Config),
     #'queue.declare_ok'{} =
         amqp_channel:call(Channel, #'queue.declare'{queue       = ?QUEUE,
                                                     auto_delete = true}),
@@ -192,9 +283,9 @@ test_temp_destination_queue(Channel, Client, _Version) ->
                                               ["ping"]),
     amqp_channel:call(Channel,#'basic.consume'{queue  = ?QUEUE, no_ack = true}),
     receive #'basic.consume_ok'{consumer_tag = _Tag} -> ok end,
-    receive {#'basic.deliver'{delivery_tag = _DTag},
+    ReplyTo = receive {#'basic.deliver'{delivery_tag = _DTag},
              #'amqp_msg'{payload = <<"ping">>,
-                         props   = #'P_basic'{reply_to = ReplyTo}}} -> ok
+                         props   = #'P_basic'{reply_to = RT}}} -> RT
     end,
     ok = amqp_channel:call(Channel,
                            #'basic.publish'{routing_key = ReplyTo},
@@ -202,14 +293,16 @@ test_temp_destination_queue(Channel, Client, _Version) ->
     {ok, _Client1, _, [<<"pong">>]} = stomp_receive(Client, "MESSAGE"),
     ok.
 
-test_temp_destination_in_send(_Channel, Client, _Version) ->
+temp_destination_in_send(Config) ->
+    Client = ?config(stomp_client, Config),
     rabbit_stomp_client:send( Client, "SEND", [{"destination", "/temp-queue/foo"}],
                                               ["poing"]),
     {ok, _Client1, Hdrs, _} = stomp_receive(Client, "ERROR"),
     "Invalid destination" = proplists:get_value("message", Hdrs),
     ok.
 
-test_blank_destination_in_send(_Channel, Client, _Version) ->
+blank_destination_in_send(Config) ->
+    Client = ?config(stomp_client, Config),
     rabbit_stomp_client:send( Client, "SEND", [{"destination", ""}],
                                               ["poing"]),
     {ok, _Client1, Hdrs, _} = stomp_receive(Client, "ERROR"),
similarity index 55%
rename from rabbitmq-server/deps/rabbitmq_stomp/test/src/rabbit_stomp_test.erl
rename to rabbitmq-server/deps/rabbitmq_stomp/test/connections_SUITE.erl
index f85573c08ce1b9225e2095c99e2ec717da4785cb..1971c546171f015c5934d4a27d6a3e87466a0aa5 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_stomp_test).
--export([all_tests/0]).
+-module(connections_SUITE).
+-compile(export_all).
+
 -import(rabbit_misc, [pget/2]).
 
+-include_lib("common_test/include/ct.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
 -include("rabbit_stomp_frame.hrl").
 -define(DESTINATION, "/queue/bulk-test").
 
-all_tests() ->
-    test_messages_not_dropped_on_disconnect(),
-    test_direct_client_connections_are_not_leaked(),
-    ok.
+all() ->
+    [
+        messages_not_dropped_on_disconnect,
+        direct_client_connections_are_not_leaked
+    ].
+
+init_per_suite(Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config,
+                                           [{rmq_nodename_suffix, ?MODULE}]),
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+                                      rabbit_ct_broker_helpers:setup_steps()).
+
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
 
 -define(GARBAGE, <<"bdaf63dda9d78b075c748b740e7c3510ad203b07\nbdaf63dd">>).
 
-count_connections() ->
+count_connections(Config) ->
+    StompPort = get_stomp_port(Config),
     %% The default port is 61613 but it's in the middle of the ephemeral
     %% ports range on many operating systems. Therefore, there is a
     %% chance this port is already in use. Let's use a port close to the
@@ -40,23 +55,30 @@ count_connections() ->
         %% listener doesn't exist. Thus this try/catch. This is the case
         %% with Linux where net.ipv6.bindv6only is disabled (default in
         %% most cases).
-        ranch_server:count_connections({acceptor, {0,0,0,0}, 5673})
+        rpc_count_connections(Config, {acceptor, {0,0,0,0}, StompPort})
     catch
-        _:badarg -> 0
+        _:{badarg, _} -> 0;
+        _:Other -> exit({foo, Other})
     end,
     IPv6Count = try
         %% Count IPv6 connections. We also use a try/catch block in case
         %% the host is not configured for IPv6.
-        ranch_server:count_connections({acceptor, {0,0,0,0,0,0,0,0}, 5673})
+        rpc_count_connections(Config, {acceptor, {0,0,0,0,0,0,0,0}, StompPort})
     catch
-        _:badarg -> 0
+        _:{badarg, _} -> 0;
+        _:Other1 -> exit({foo, Other1})
     end,
     IPv4Count + IPv6Count.
 
-test_direct_client_connections_are_not_leaked() ->
-    N = count_connections(),
+rpc_count_connections(Config, ConnSpec) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0,
+                                 ranch_server, count_connections, [ConnSpec]).
+
+direct_client_connections_are_not_leaked(Config) ->
+    StompPort = get_stomp_port(Config),
+    N = count_connections(Config),
     lists:foreach(fun (_) ->
-                          {ok, Client = {Socket, _}} = rabbit_stomp_client:connect(),
+                          {ok, Client = {Socket, _}} = rabbit_stomp_client:connect(StompPort),
                           %% send garbage which trips up the parser
                           gen_tcp:send(Socket, ?GARBAGE),
                           rabbit_stomp_client:send(
@@ -64,23 +86,27 @@ test_direct_client_connections_are_not_leaked() ->
                   end,
                   lists:seq(1, 100)),
     timer:sleep(5000),
-    N = count_connections(),
+    N = count_connections(Config),
     ok.
 
-test_messages_not_dropped_on_disconnect() ->
-    N = count_connections(),
-    {ok, Client} = rabbit_stomp_client:connect(),
+messages_not_dropped_on_disconnect(Config) ->
+    StompPort = get_stomp_port(Config),
+    N = count_connections(Config),
+    {ok, Client} = rabbit_stomp_client:connect(StompPort),
     N1 = N + 1,
-    N1 = count_connections(),
+    N1 = count_connections(Config),
     [rabbit_stomp_client:send(
        Client, "SEND", [{"destination", ?DESTINATION}],
        [integer_to_list(Count)]) || Count <- lists:seq(1, 1000)],
     rabbit_stomp_client:disconnect(Client),
     QName = rabbit_misc:r(<<"/">>, queue, <<"bulk-test">>),
     timer:sleep(3000),
-    N = count_connections(),
-    rabbit_amqqueue:with(
-      QName, fun(Q) ->
-                     1000 = pget(messages, rabbit_amqqueue:info(Q, [messages]))
-             end),
+    N = count_connections(Config),
+    {ok, Q} = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, lookup, [QName]),
+    Messages = rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_amqqueue, info, [Q, [messages]]),
+    1000 = pget(messages, Messages),
     ok.
+
+get_stomp_port(Config) ->
+    rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp).
+
similarity index 78%
rename from rabbitmq-server/deps/rabbitmq_stomp/test/src/rabbit_stomp_test_frame.erl
rename to rabbitmq-server/deps/rabbitmq_stomp/test/frame_SUITE.erl
index 6f427489e7b0ea8661427f5dce4168fb7100b962..006d3b64e09c44020728503e4f7bf626266f75c1 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_stomp_test_frame).
+-module(frame_SUITE).
 
+-include_lib("common_test/include/ct.hrl").
 -include_lib("eunit/include/eunit.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
 -include("rabbit_stomp_frame.hrl").
 -include("rabbit_stomp_headers.hrl").
-
-parse_simple_frame_test() ->
+-compile(export_all).
+
+all() ->
+    [
+    parse_simple_frame,
+    parse_simple_frame_crlf,
+    parse_command_only,
+    parse_ignore_empty_frames,
+    parse_heartbeat_interframe,
+    parse_crlf_interframe,
+    parse_carriage_return_not_ignored_interframe,
+    parse_carriage_return_mid_command,
+    parse_carriage_return_end_command,
+    parse_resume_mid_command,
+    parse_resume_mid_header_key,
+    parse_resume_mid_header_val,
+    parse_resume_mid_body,
+    parse_no_header_stripping,
+    parse_multiple_headers,
+    header_no_colon,
+    no_nested_escapes,
+    header_name_with_cr,
+    header_value_with_cr,
+    header_value_with_colon,
+    headers_escaping_roundtrip,
+    headers_escaping_roundtrip_without_trailing_lf
+    ].
+
+parse_simple_frame(_) ->
     parse_simple_frame_gen("\n").
 
-parse_simple_frame_crlf_test() ->
+parse_simple_frame_crlf(_) ->
     parse_simple_frame_gen("\r\n").
 
 parse_simple_frame_gen(Term) ->
@@ -40,34 +68,34 @@ parse_simple_frame_gen(Term) ->
     #stomp_frame{body_iolist = Body} = Frame,
     ?assertEqual(<<"Body Content">>, iolist_to_binary(Body)).
 
-parse_command_only_test() ->
+parse_command_only(_) ->
     {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("COMMAND\n\n\0").
 
-parse_ignore_empty_frames_test() ->
+parse_ignore_empty_frames(_) ->
     {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("\0\0COMMAND\n\n\0").
 
-parse_heartbeat_interframe_test() ->
+parse_heartbeat_interframe(_) ->
     {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("\nCOMMAND\n\n\0").
 
-parse_crlf_interframe_test() ->
+parse_crlf_interframe(_) ->
     {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse("\r\nCOMMAND\n\n\0").
 
-parse_carriage_return_not_ignored_interframe_test() ->
+parse_carriage_return_not_ignored_interframe(_) ->
     {error, {unexpected_chars_between_frames, "\rC"}} = parse("\rCOMMAND\n\n\0").
 
-parse_carriage_return_mid_command_test() ->
+parse_carriage_return_mid_command(_) ->
     {error, {unexpected_chars_in_command, "\rA"}} = parse("COMM\rAND\n\n\0").
 
-parse_carriage_return_end_command_test() ->
+parse_carriage_return_end_command(_) ->
     {error, {unexpected_chars_in_command, "\r\r"}} = parse("COMMAND\r\r\n\n\0").
 
-parse_resume_mid_command_test() ->
+parse_resume_mid_command(_) ->
     First = "COMM",
     Second = "AND\n\n\0",
     {more, Resume} = parse(First),
     {ok, #stomp_frame{command = "COMMAND"}, _Rest} = parse(Second, Resume).
 
-parse_resume_mid_header_key_test() ->
+parse_resume_mid_header_key(_) ->
     First = "COMMAND\nheade",
     Second = "r1:value1\n\n\0",
     {more, Resume} = parse(First),
@@ -76,7 +104,7 @@ parse_resume_mid_header_key_test() ->
     ?assertEqual({ok, "value1"},
                  rabbit_stomp_frame:header(Frame, "header1")).
 
-parse_resume_mid_header_val_test() ->
+parse_resume_mid_header_val(_) ->
     First = "COMMAND\nheader1:val",
     Second = "ue1\n\n\0",
     {more, Resume} = parse(First),
@@ -85,7 +113,7 @@ parse_resume_mid_header_val_test() ->
     ?assertEqual({ok, "value1"},
                  rabbit_stomp_frame:header(Frame, "header1")).
 
-parse_resume_mid_body_test() ->
+parse_resume_mid_body(_) ->
     First = "COMMAND\n\nABC",
     Second = "DEF\0",
     {more, Resume} = parse(First),
@@ -93,19 +121,19 @@ parse_resume_mid_body_test() ->
          parse(Second, Resume),
     ?assertEqual([<<"ABC">>, <<"DEF">>], Body).
 
-parse_no_header_stripping_test() ->
+parse_no_header_stripping(_) ->
     Content = "COMMAND\nheader: foo \n\n\0",
     {ok, Frame, _} = parse(Content),
     {ok, Val} = rabbit_stomp_frame:header(Frame, "header"),
     ?assertEqual(" foo ", Val).
 
-parse_multiple_headers_test() ->
+parse_multiple_headers(_) ->
     Content = "COMMAND\nheader:correct\nheader:incorrect\n\n\0",
     {ok, Frame, _} = parse(Content),
     {ok, Val} = rabbit_stomp_frame:header(Frame, "header"),
     ?assertEqual("correct", Val).
 
-header_no_colon_test() ->
+header_no_colon(_) ->
     Content = "COMMAND\n"
               "hdr1:val1\n"
               "hdrerror\n"
@@ -113,7 +141,7 @@ header_no_colon_test() ->
               "\n\0",
     ?assertEqual(parse(Content), {error, {header_no_value, "hdrerror"}}).
 
-no_nested_escapes_test() ->
+no_nested_escapes(_) ->
     Content = "COM\\\\rAND\n"      % no escapes
               "hdr\\\\rname:"      % one escape
               "hdr\\\\rval\n\n\0", % one escape
@@ -123,15 +151,15 @@ no_nested_escapes_test() ->
                               headers = [{"hdr\\rname", "hdr\\rval"}],
                               body_iolist = []}).
 
-header_name_with_cr_test() ->
+header_name_with_cr(_) ->
     Content = "COMMAND\nhead\rer:val\n\n\0",
     {error, {unexpected_chars_in_header, "\re"}} = parse(Content).
 
-header_value_with_cr_test() ->
+header_value_with_cr(_) ->
     Content = "COMMAND\nheader:val\rue\n\n\0",
     {error, {unexpected_chars_in_header, "\ru"}} = parse(Content).
 
-header_value_with_colon_test() ->
+header_value_with_colon(_) ->
     Content = "COMMAND\nheader:val:ue\n\n\0",
     {ok, Frame, _} = parse(Content),
     ?assertEqual(Frame,
@@ -146,10 +174,10 @@ test_frame_serialization(Expected, TrailingLF) ->
     Serialized = lists:flatten(rabbit_stomp_frame:serialize(Frame, TrailingLF)),
     ?assertEqual(Expected, rabbit_misc:format("~s", [Serialized])).
 
-headers_escaping_roundtrip_test() ->
+headers_escaping_roundtrip(_) ->
     test_frame_serialization("COMMAND\nhead\\r\\c\\ner:\\c\\n\\r\\\\\n\n\0\n", true).
 
-headers_escaping_roundtrip_without_trailing_lf_test() ->
+headers_escaping_roundtrip_without_trailing_lf(_) ->
     test_frame_serialization("COMMAND\nhead\\r\\c\\ner:\\c\\n\\r\\\\\n\n\0", false).
 
 parse(Content) ->
@@ -161,9 +189,6 @@ parse_complete(Content) ->
     {ok, Frame = #stomp_frame{command = Command}, State} = parse(Content),
     {Command, Frame, State}.
 
-frame_string(Command, Headers, BodyContent) ->
-    frame_string(Command, Headers, BodyContent, "\n").
-
 frame_string(Command, Headers, BodyContent, Term) ->
     HeaderString =
         lists:flatten([Key ++ ":" ++ Value ++ Term || {Key, Value} <- Headers]),
diff --git a/rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE.erl b/rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE.erl
new file mode 100644 (file)
index 0000000..b6098a3
--- /dev/null
@@ -0,0 +1,63 @@
+-module(python_SUITE).
+-compile(export_all).
+-include_lib("common_test/include/ct.hrl").
+
+all() ->
+    [
+    common,
+    ssl,
+    connect_options
+    ].
+
+init_per_testcase(_, Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config,
+                                           [{rmq_certspwd, "bunnychow"},
+                                            {rmq_nodename_suffix, ?MODULE}]),
+    rabbit_ct_helpers:log_environment(),
+    Config2 = rabbit_ct_helpers:run_setup_steps(
+        Config1,
+        rabbit_ct_broker_helpers:setup_steps()),
+    DataDir = ?config(data_dir, Config2),
+    PikaDir = filename:join([DataDir, "deps", "pika"]),
+    StomppyDir = filename:join([DataDir, "deps", "stomppy"]),
+    rabbit_ct_helpers:make(Config2, PikaDir, []),
+    rabbit_ct_helpers:make(Config2, StomppyDir, []),
+    Config2.
+
+end_per_testcase(_, Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+
+common(Config) ->
+    run(Config, filename:join("src", "test.py")).
+
+connect_options(Config) ->
+    run(Config, filename:join("src", "test_connect_options.py")).
+
+ssl(Config) ->
+    run(Config, filename:join("src", "test_ssl.py")).
+
+run(Config, Test) ->
+    DataDir = ?config(data_dir, Config),
+    CertsDir = rabbit_ct_helpers:get_config(Config, rmq_certsdir),
+    StompPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp),
+    StompPortTls = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp_tls),
+    AmqpPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+    NodeName = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    PythonPath = os:getenv("PYTHONPATH"),
+    os:putenv("PYTHONPATH", filename:join([DataDir, "deps", "pika","pika"])
+                            ++":"++
+                            filename:join([DataDir, "deps", "stomppy", "stomppy"])
+                            ++ ":" ++
+                            PythonPath),
+    os:putenv("AMQP_PORT", integer_to_list(AmqpPort)),
+    os:putenv("STOMP_PORT", integer_to_list(StompPort)),
+    os:putenv("STOMP_PORT_TLS", integer_to_list(StompPortTls)),
+    os:putenv("RABBITMQ_NODENAME", atom_to_list(NodeName)),
+    os:putenv("SSL_CERTS_PATH", CertsDir),
+    {ok, _} = rabbit_ct_helpers:exec([filename:join(DataDir, Test)]).
+
+
+cur_dir() ->
+    {Src, _} = filename:find_src(?MODULE),
+    filename:dirname(Src).
similarity index 99%
rename from rabbitmq-server/deps/rabbitmq_stomp/test/src/ack.py
rename to rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/ack.py
index 9befdad08499093d0aafcd953e6c65d59682839f..6132e49846321436d544189102016731d3c9dc02 100644 (file)
@@ -2,6 +2,7 @@ import unittest
 import stomp
 import base
 import time
+import os
 
 class TestAck(base.BaseTest):
 
@@ -220,7 +221,7 @@ class TestAck(base.BaseTest):
 class TestAck11(TestAck):
 
    def create_connection_obj(self, version='1.1', vhost='/', heartbeats=(0, 0)):
-       conn = stomp.StompConnection11(host_and_ports=[('localhost', 5673)],
+       conn = stomp.StompConnection11(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))],
                                       vhost=vhost,
                                       heartbeats=heartbeats)
        self.ack_id_source_header = 'message-id'
@@ -233,7 +234,7 @@ class TestAck11(TestAck):
 class TestAck12(TestAck):
 
    def create_connection_obj(self, version='1.2', vhost='/', heartbeats=(0, 0)):
-       conn = stomp.StompConnection12(host_and_ports=[('localhost', 5673)],
+       conn = stomp.StompConnection12(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))],
                                       vhost=vhost,
                                       heartbeats=heartbeats)
        self.ack_id_source_header = 'ack'
similarity index 98%
rename from rabbitmq-server/deps/rabbitmq_stomp/test/src/base.py
rename to rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/base.py
index 20f99d9b45c1c37c2334724353b52e1eb60536aa..134f5c4fbc752fee9e9ca0218a1f5504a3f86cec 100644 (file)
@@ -2,29 +2,30 @@ import unittest
 import stomp
 import sys
 import threading
+import os
 
 
 class BaseTest(unittest.TestCase):
 
    def create_connection_obj(self, version='1.0', vhost='/', heartbeats=(0, 0)):
        if version == '1.0':
-           conn = stomp.StompConnection10(host_and_ports=[('localhost', 5673)])
+           conn = stomp.StompConnection10(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))])
            self.ack_id_source_header = 'message-id'
            self.ack_id_header = 'message-id'
        elif version == '1.1':
-           conn = stomp.StompConnection11(host_and_ports=[('localhost', 5673)],
+           conn = stomp.StompConnection11(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))],
                                           vhost=vhost,
                                           heartbeats=heartbeats)
            self.ack_id_source_header = 'message-id'
            self.ack_id_header = 'message-id'
        elif version == '1.2':
-           conn = stomp.StompConnection12(host_and_ports=[('localhost', 5673)],
+           conn = stomp.StompConnection12(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))],
                                           vhost=vhost,
                                           heartbeats=heartbeats)
            self.ack_id_source_header = 'ack'
            self.ack_id_header = 'id'
        else:
-           conn = stomp.StompConnection12(host_and_ports=[('localhost', 5673)],
+           conn = stomp.StompConnection12(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))],
                                           vhost=vhost,
                                           heartbeats=heartbeats)
            conn.version = version
similarity index 95%
rename from rabbitmq-server/deps/rabbitmq_stomp/test/src/connect_options.py
rename to rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/connect_options.py
index b8f558c5eb720ea159bce9cd2900f23e879dbdcf..d53457f798a4ed3eb5409db85505fb18250332dd 100644 (file)
@@ -2,6 +2,7 @@ import unittest
 import stomp
 import base
 import test_util
+import os
 
 class TestConnectOptions(base.BaseTest):
 
@@ -10,7 +11,7 @@ class TestConnectOptions(base.BaseTest):
         self.conn.disconnect()
         test_util.enable_implicit_connect()
         listener = base.WaitableListener()
-        new_conn = stomp.Connection(host_and_ports=[('localhost', 5673)])
+        new_conn = stomp.Connection(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))])
         new_conn.set_listener('', listener)
 
         new_conn.start() # not going to issue connect
@@ -31,7 +32,7 @@ class TestConnectOptions(base.BaseTest):
         self.conn.disconnect()
         test_util.enable_default_user()
         listener = base.WaitableListener()
-        new_conn = stomp.Connection(host_and_ports=[('localhost', 5673)])
+        new_conn = stomp.Connection(host_and_ports=[('localhost', int(os.environ["STOMP_PORT"]))])
         new_conn.set_listener('', listener)
         new_conn.start()
         new_conn.connect()
similarity index 99%
rename from rabbitmq-server/deps/rabbitmq_stomp/test/src/parsing.py
rename to rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/parsing.py
index d14fb86349c23bbce219f1d6a02503abc6bcdf11..bd37977608d2443986065112294f9a1089015ef5 100644 (file)
@@ -8,6 +8,7 @@ import socket
 import functools
 import time
 import sys
+import os
 
 def connect(cnames):
     ''' Decorator that creates stomp connections and issues CONNECT '''
@@ -17,9 +18,9 @@ def connect(cnames):
         '\n'
         '\n\0')
     resp = ('CONNECTED\n'
+            'server:RabbitMQ/(.*)\n'
             'session:(.*)\n'
             'heart-beat:0,0\n'
-            'server:RabbitMQ/(.*)\n'
             'version:1.0\n'
             '\n\x00')
     def w(m):
@@ -51,7 +52,7 @@ class TestParsing(unittest.TestCase):
     # ports range on many operating systems. Therefore, there is a
     # chance this port is already in use. Let's use a port close to the
     # AMQP default port.
-    port=5673
+    port=int(os.environ["STOMP_PORT"])
 
 
     def match(self, pattern, data):
similarity index 95%
rename from rabbitmq-server/deps/rabbitmq_stomp/test/src/queue_properties.py
rename to rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/queue_properties.py
index cc85487a6a8f135d56522077b94b8b4cbfea541f..f18072ec78f89ced24f0156b196e00c289d5fcb3 100644 (file)
@@ -3,6 +3,7 @@ import stomp
 import pika
 import base
 import time
+import os
 
 class TestQueueProperties(base.BaseTest):
 
@@ -25,7 +26,7 @@ class TestQueueProperties(base.BaseTest):
         # if the properties are the same we should
         # not get any error
         connection = pika.BlockingConnection(pika.ConnectionParameters(
-                    host='localhost'))
+                    host='localhost', port=int(os.environ["AMQP_PORT"])))
         channel = connection.channel()
         channel.queue_declare(queue='queue-properties-subscribe-test',
                               durable=True,
@@ -61,7 +62,7 @@ class TestQueueProperties(base.BaseTest):
         # if the properties are the same we should
         # not get any error
         connection = pika.BlockingConnection(pika.ConnectionParameters(
-                    host='localhost'))
+                    host='localhost', port=int(os.environ["AMQP_PORT"])))
         channel = connection.channel()
         channel.queue_declare(queue='queue-properties-send-test',
                               durable=True,
similarity index 96%
rename from rabbitmq-server/deps/rabbitmq_stomp/test/src/ssl_lifecycle.py
rename to rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/ssl_lifecycle.py
index 903433658ba23972a653a9858abe5e8d85c7bf43..17c9f51a6e69d4f7b53223e5c6f07cf6648330c5 100644 (file)
@@ -7,7 +7,6 @@ import stomp
 import base
 import ssl
 
-
 base_path = os.path.dirname(sys.argv[0])
 
 ssl_key_file = os.path.join(os.getenv('SSL_CERTS_PATH'), 'client', 'key.pem')
@@ -17,7 +16,7 @@ ssl_ca_certs = os.path.join(os.getenv('SSL_CERTS_PATH'), 'testca', 'cacert.pem')
 class TestSslClient(unittest.TestCase):
 
     def __ssl_connect(self):
-        conn = stomp.Connection(host_and_ports = [ ('localhost', 5674) ],
+        conn = stomp.Connection(host_and_ports = [ ('localhost', int(os.environ["STOMP_PORT_TLS"])) ],
                                 use_ssl = True, ssl_key_file = ssl_key_file,
                                 ssl_cert_file = ssl_cert_file,
                                 ssl_ca_certs = ssl_ca_certs)
@@ -27,7 +26,7 @@ class TestSslClient(unittest.TestCase):
         return conn
 
     def __ssl_auth_connect(self):
-        conn = stomp.Connection(host_and_ports = [ ('localhost', 5674) ],
+        conn = stomp.Connection(host_and_ports = [ ('localhost', int(os.environ["STOMP_PORT_TLS"])) ],
                                 use_ssl = True, ssl_key_file = ssl_key_file,
                                 ssl_cert_file = ssl_cert_file,
                                 ssl_ca_certs = ssl_ca_certs)
similarity index 89%
rename from rabbitmq-server/deps/rabbitmq_stomp/test/src/x_queue_name.py
rename to rabbitmq-server/deps/rabbitmq_stomp/test/python_SUITE_data/src/x_queue_name.py
index 806b102327455dfc7f68a57660d2a9d4b6ee12d9..082957865e8f99df12bc5c8e9b32efcc1c23143a 100644 (file)
@@ -3,6 +3,7 @@ import stomp
 import pika
 import base
 import time
+import os
 
 class TestUserGeneratedQueueName(base.BaseTest):
 
@@ -18,7 +19,7 @@ class TestUserGeneratedQueueName(base.BaseTest):
                 )
 
         connection = pika.BlockingConnection(
-                pika.ConnectionParameters( host='localhost'))
+                pika.ConnectionParameters( host='localhost', port=int(os.environ["AMQP_PORT"])))
         channel = connection.channel()
 
         # publish a message to the named queue
@@ -46,7 +47,7 @@ class TestUserGeneratedQueueName(base.BaseTest):
                 )
 
         connection = pika.BlockingConnection(
-                pika.ConnectionParameters( host='localhost'))
+                pika.ConnectionParameters( host='localhost', port=int(os.environ["AMQP_PORT"])))
         channel = connection.channel()
 
         # publish a message to the named queue
index 6718a798b8294669d0efe1c53f9927dc5c843cd1..a4acb3e89313793198c0c68bc73480dc69466505 100644 (file)
 
 -module(rabbit_stomp_client).
 
--export([connect/0, connect/1, disconnect/1, send/2, send/3, send/4, recv/1]).
+-export([connect/1, connect/2, connect/4, disconnect/1, send/2, send/3, send/4, recv/1]).
 
 -include("rabbit_stomp_frame.hrl").
 
 -define(TIMEOUT, 1000). % milliseconds
 
-connect()  -> connect0([]).
-connect(V) -> connect0([{"accept-version", V}]).
+connect(Port)  -> connect0([], "guest", "guest", Port).
+connect(V, Port) -> connect0([{"accept-version", V}], "guest", "guest", Port).
+connect(V, Login, Pass, Port) -> connect0([{"accept-version", V}], Login, Pass, Port).
 
-connect0(Version) ->
+connect0(Version, Login, Pass, Port) ->
     %% The default port is 61613 but it's in the middle of the ephemeral
     %% ports range on many operating systems. Therefore, there is a
     %% chance this port is already in use. Let's use a port close to the
     %% AMQP default port.
-    {ok, Sock} = gen_tcp:connect(localhost, 5673, [{active, false}, binary]),
+    {ok, Sock} = gen_tcp:connect(localhost, Port, [{active, false}, binary]),
     Client0 = recv_state(Sock),
-    send(Client0, "CONNECT", [{"login", "guest"},
-                              {"passcode", "guest"} | Version]),
+    send(Client0, "CONNECT", [{"login", Login},
+                              {"passcode", Pass} | Version]),
     {#stomp_frame{command = "CONNECTED"}, Client1} = recv(Client0),
     {ok, Client1}.
 
similarity index 81%
rename from rabbitmq-server/deps/rabbitmq_stomp/test/src/rabbit_stomp_test_util.erl
rename to rabbitmq-server/deps/rabbitmq_stomp/test/util_SUITE.erl
index ce8d2269247938ae4e15e56656de4c0085322f90..c234d5895e399f03ec40b22f7aee388b713fd42e 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_stomp_test_util).
+-module(util_SUITE).
 
+-include_lib("common_test/include/ct.hrl").
 -include_lib("eunit/include/eunit.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
 -include_lib("amqp_client/include/rabbit_routing_prefixes.hrl").
 -include("rabbit_stomp_frame.hrl").
+-compile(export_all).
+
+all() -> [
+    longstr_field,
+    message_properties,
+    message_headers,
+    minimal_message_headers_with_no_custom,
+    headers_post_process,
+    headers_post_process_noop_replyto,
+    headers_post_process_noop2,
+    negotiate_version_both_empty,
+    negotiate_version_no_common,
+    negotiate_version_simple_common,
+    negotiate_version_two_choice_common,
+    negotiate_version_two_choice_common_out_of_order,
+    negotiate_version_two_choice_big_common,
+    negotiate_version_choice_mismatched_length,
+    negotiate_version_choice_duplicates,
+    trim_headers,
+    ack_mode_auto,
+    ack_mode_auto_default,
+    ack_mode_client,
+    ack_mode_client_individual,
+    consumer_tag_id,
+    consumer_tag_destination,
+    consumer_tag_invalid,
+    parse_valid_message_id,
+    parse_invalid_message_id
+    ].
+
 
 %%--------------------------------------------------------------------
 %% Header Processing Tests
 %%--------------------------------------------------------------------
 
-longstr_field_test() ->
+longstr_field(_) ->
     {<<"ABC">>, longstr, <<"DEF">>} =
         rabbit_stomp_util:longstr_field("ABC", "DEF").
 
-message_properties_test() ->
+message_properties(_) ->
     Headers = [
                 {"content-type", "text/plain"},
                 {"content-encoding", "UTF-8"},
@@ -65,7 +96,7 @@ message_properties_test() ->
               } =
         rabbit_stomp_util:message_properties(#stomp_frame{headers = Headers}).
 
-message_headers_test() ->
+message_headers(_) ->
     Properties = #'P_basic'{
       headers          = [{<<"str">>, longstr, <<"foo">>},
                           {<<"int">>, signedint, 123}],
@@ -102,13 +133,7 @@ message_headers_test() ->
 
     [] = lists:subtract(Headers, Expected).
 
-minimal_message_headers_with_no_custom_test() ->
-    Delivery = #'basic.deliver'{
-      consumer_tag = <<"Q_123">>,
-      delivery_tag = 123,
-      exchange = <<"">>,
-      routing_key = <<"foo">>},
-
+minimal_message_headers_with_no_custom(_) ->
     Properties = #'P_basic'{},
 
     Headers = rabbit_stomp_util:message_headers(Properties),
@@ -120,7 +145,7 @@ minimal_message_headers_with_no_custom_test() ->
 
     [] = lists:subtract(Headers, Expected).
 
-headers_post_process_test() ->
+headers_post_process(_) ->
     Headers  = [{"header1", "1"},
                 {"header2", "12"},
                 {"reply-to", "something"}],
@@ -130,13 +155,13 @@ headers_post_process_test() ->
     [] = lists:subtract(
            rabbit_stomp_util:headers_post_process(Headers), Expected).
 
-headers_post_process_noop_replyto_test() ->
+headers_post_process_noop_replyto(_) ->
     [begin
          Headers = [{"reply-to", Prefix ++ "/something"}],
          Headers = rabbit_stomp_util:headers_post_process(Headers)
      end || Prefix <- rabbit_routing_util:dest_prefixes()].
 
-headers_post_process_noop2_test() ->
+headers_post_process_noop2(_) ->
     Headers  = [{"header1", "1"},
                 {"header2", "12"}],
     Expected = [{"header1", "1"},
@@ -144,38 +169,38 @@ headers_post_process_noop2_test() ->
     [] = lists:subtract(
            rabbit_stomp_util:headers_post_process(Headers), Expected).
 
-negotiate_version_both_empty_test() ->
+negotiate_version_both_empty(_) ->
     {error, no_common_version} = rabbit_stomp_util:negotiate_version([],[]).
 
-negotiate_version_no_common_test() ->
+negotiate_version_no_common(_) ->
     {error, no_common_version} =
         rabbit_stomp_util:negotiate_version(["1.2"],["1.3"]).
 
-negotiate_version_simple_common_test() ->
+negotiate_version_simple_common(_) ->
     {ok, "1.2"} =
         rabbit_stomp_util:negotiate_version(["1.2"],["1.2"]).
 
-negotiate_version_two_choice_common_test() ->
+negotiate_version_two_choice_common(_) ->
     {ok, "1.3"} =
         rabbit_stomp_util:negotiate_version(["1.2", "1.3"],["1.2", "1.3"]).
 
-negotiate_version_two_choice_common_out_of_order_test() ->
+negotiate_version_two_choice_common_out_of_order(_) ->
     {ok, "1.3"} =
         rabbit_stomp_util:negotiate_version(["1.3", "1.2"],["1.2", "1.3"]).
 
-negotiate_version_two_choice_big_common_test() ->
+negotiate_version_two_choice_big_common(_) ->
     {ok, "1.20.23"} =
         rabbit_stomp_util:negotiate_version(["1.20.23", "1.30.456"],
                                             ["1.20.23", "1.30.457"]).
-negotiate_version_choice_mismatched_length_test() ->
+negotiate_version_choice_mismatched_length(_) ->
     {ok, "1.2.3"} =
         rabbit_stomp_util:negotiate_version(["1.2", "1.2.3"],
                                             ["1.2.3", "1.2"]).
-negotiate_version_choice_duplicates_test() ->
+negotiate_version_choice_duplicates(_) ->
     {ok, "1.2"} =
         rabbit_stomp_util:negotiate_version(["1.2", "1.2"],
                                             ["1.2", "1.2"]).
-trim_headers_test() ->
+trim_headers(_) ->
     #stomp_frame{headers = [{"one", "foo"}, {"two", "baz "}]} =
         rabbit_stomp_util:trim_headers(
           #stomp_frame{headers = [{"one", "  foo"}, {"two", " baz "}]}).
@@ -184,31 +209,31 @@ trim_headers_test() ->
 %% Frame Parsing Tests
 %%--------------------------------------------------------------------
 
-ack_mode_auto_test() ->
+ack_mode_auto(_) ->
     Frame = #stomp_frame{headers = [{"ack", "auto"}]},
     {auto, _} = rabbit_stomp_util:ack_mode(Frame).
 
-ack_mode_auto_default_test() ->
+ack_mode_auto_default(_) ->
     Frame = #stomp_frame{headers = []},
     {auto, _} = rabbit_stomp_util:ack_mode(Frame).
 
-ack_mode_client_test() ->
+ack_mode_client(_) ->
     Frame = #stomp_frame{headers = [{"ack", "client"}]},
     {client, true} = rabbit_stomp_util:ack_mode(Frame).
 
-ack_mode_client_individual_test() ->
+ack_mode_client_individual(_) ->
     Frame = #stomp_frame{headers = [{"ack", "client-individual"}]},
     {client, false} = rabbit_stomp_util:ack_mode(Frame).
 
-consumer_tag_id_test() ->
+consumer_tag_id(_) ->
     Frame = #stomp_frame{headers = [{"id", "foo"}]},
     {ok, <<"T_foo">>, _} = rabbit_stomp_util:consumer_tag(Frame).
 
-consumer_tag_destination_test() ->
+consumer_tag_destination(_) ->
     Frame = #stomp_frame{headers = [{"destination", "foo"}]},
     {ok, <<"Q_foo">>, _} = rabbit_stomp_util:consumer_tag(Frame).
 
-consumer_tag_invalid_test() ->
+consumer_tag_invalid(_) ->
     Frame = #stomp_frame{headers = []},
     {error, missing_destination_header} = rabbit_stomp_util:consumer_tag(Frame).
 
@@ -216,11 +241,11 @@ consumer_tag_invalid_test() ->
 %% Message ID Parsing Tests
 %%--------------------------------------------------------------------
 
-parse_valid_message_id_test() ->
+parse_valid_message_id(_) ->
     {ok, {<<"bar">>, "abc", 123}} =
         rabbit_stomp_util:parse_message_id("bar@@abc@@123").
 
-parse_invalid_message_id_test() ->
+parse_invalid_message_id(_) ->
     {error, invalid_message_id} =
         rabbit_stomp_util:parse_message_id("blah").
 
diff --git a/rabbitmq-server/deps/rabbitmq_top/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_top/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
diff --git a/rabbitmq-server/deps/rabbitmq_top/CONTRIBUTING.md b/rabbitmq-server/deps/rabbitmq_top/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..45bbcbe
--- /dev/null
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/rabbitmq-server/deps/rabbitmq_top/Makefile b/rabbitmq-server/deps/rabbitmq_top/Makefile
new file mode 100644 (file)
index 0000000..36904cd
--- /dev/null
@@ -0,0 +1,14 @@
+PROJECT = rabbitmq_top
+
+DEPS = amqp_client rabbitmq_management webmachine
+
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+include erlang.mk
diff --git a/rabbitmq-server/deps/rabbitmq_top/README.md b/rabbitmq-server/deps/rabbitmq_top/README.md
new file mode 100644 (file)
index 0000000..1d5b546
--- /dev/null
@@ -0,0 +1,42 @@
+# Features
+
+Adds top-like information on the Erlang VM to the management plugin.
+
+Screenshots: http://imgur.com/a/BjVOP
+
+Should work with older versions of RabbitMQ, but when compiled against
+RabbitMQ 3.3.0 or later you can see descriptions of the processes
+matching RabbitMQ server concepts (queue, channel etc).
+
+Sort by process ID, memory use or reductions/sec (an approximate
+measure of CPU use).
+
+Click on the process description (e.g. "my queue") to see that
+object's management view.
+
+Click on the process ID (e.g. "&lt;0.3423.0&gt;") to see some more
+Erlang-ish process details, including the current stacktrace.
+
+# Downloading
+
+You can download a pre-built binary of this plugin from
+http://www.rabbitmq.com/community-plugins.html.
+
+# Building
+
+You can build and install it like any other plugin (see
+[the plugin development guide](http://www.rabbitmq.com/plugin-development.html)).
+
+# API
+
+You can drive the HTTP API yourself. It installs into the management plugin's API; you should understand that first. Once you do, the additional paths look like:
+
+    /api/top/<node-name>
+
+List of processes. Takes similar query string parameters to other
+lists, `sort`, `sort_reverse` and `columns`. Sorting is quite
+important as it currently hard-codes returning the top 20 processes.
+
+    /api/process/<pid>
+
+Individual process details.
diff --git a/rabbitmq-server/deps/rabbitmq_top/erlang.mk b/rabbitmq-server/deps/rabbitmq_top/erlang.mk
new file mode 100644 (file)
index 0000000..9f0c0c3
--- /dev/null
@@ -0,0 +1,6589 @@
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app deps search rel docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+
+ERLANG_MK_VERSION = 2.0.0-pre.2-16-gb52203c-dirty
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+gen_verbose_0 = @echo " GEN   " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A0 -noinput -boot start_clean
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+       $(verbose) :
+
+check:: clean app tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+       $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+distclean-tmp:
+       $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+       $(verbose) printf "%s\n" \
+               "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+               "Copyright (c) 2013-2015 Loïc Hoguin <essen@ninenines.eu>" \
+               "" \
+               "Usage: [V=1] $(MAKE) [target]..." \
+               "" \
+               "Core targets:" \
+               "  all           Run deps, app and rel targets in that order" \
+               "  app           Compile the project" \
+               "  deps          Fetch dependencies (if needed) and compile them" \
+               "  fetch-deps    Fetch dependencies (if needed) without compiling them" \
+               "  list-deps     Fetch dependencies (if needed) and list them" \
+               "  search q=...  Search for a package in the built-in index" \
+               "  rel           Build a release for this project, if applicable" \
+               "  docs          Build the documentation for this project" \
+               "  install-docs  Install the man pages for this project" \
+               "  check         Compile and run all tests and analysis for this project" \
+               "  tests         Run the tests for this project" \
+               "  clean         Delete temporary and output files from most targets" \
+               "  distclean     Delete all temporary and output files" \
+               "  help          Display this help and exit" \
+               "  erlang-mk     Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty)        $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $(2) -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(subst ",\",$(1)))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(subst \,\\\\,$(shell cygpath -w $1))
+else
+core_native_path = $1
+endif
+
+ifeq ($(shell which wget 2>/dev/null | wc -l), 1)
+define core_http_get
+       wget --no-check-certificate -O $(1) $(2)|| rm $(1)
+endef
+else
+define core_http_get.erl
+       ssl:start(),
+       inets:start(),
+       case httpc:request(get, {"$(2)", []}, [{autoredirect, true}], []) of
+               {ok, {{_, 200, _}, _, Body}} ->
+                       case file:write_file("$(1)", Body) of
+                               ok -> ok;
+                               {error, R1} -> halt(R1)
+                       end;
+               {error, R2} ->
+                       halt(R2)
+       end,
+       halt(0).
+endef
+
+define core_http_get
+       $(call erlang,$(call core_http_get.erl,$(call core_native_path,$1),$2))
+endef
+endif
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) -type f -name $(subst *,\*,$2)))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk:
+       git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ifdef ERLANG_MK_COMMIT
+       cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+endif
+       if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+       $(MAKE) -C $(ERLANG_MK_BUILD_DIR)
+       cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+       rm -rf $(ERLANG_MK_BUILD_DIR)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = 1.0.4
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/riverrun/branglecrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/riverrun/branglecrypt
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = master
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = v0.1.2
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY.  It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += classifier
+pkg_classifier_name = classifier
+pkg_classifier_description = An Erlang Bayesian Filter and Text Classifier
+pkg_classifier_homepage = https://github.com/inaka/classifier
+pkg_classifier_fetch = git
+pkg_classifier_repo = https://github.com/inaka/classifier
+pkg_classifier_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.1
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.1
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dhtcrawler
+pkg_dhtcrawler_name = dhtcrawler
+pkg_dhtcrawler_description = dhtcrawler is a DHT crawler written in erlang. It can join a DHT network and crawl many P2P torrents.
+pkg_dhtcrawler_homepage = https://github.com/kevinlynx/dhtcrawler
+pkg_dhtcrawler_fetch = git
+pkg_dhtcrawler_repo = https://github.com/kevinlynx/dhtcrawler
+pkg_dhtcrawler_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D    NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dtl
+pkg_dtl_name = dtl
+pkg_dtl_description = Django Template Language: A full-featured port of the Django template engine to Erlang.
+pkg_dtl_homepage = https://github.com/oinksoft/dtl
+pkg_dtl_fetch = git
+pkg_dtl_repo = https://github.com/oinksoft/dtl
+pkg_dtl_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += eganglia
+pkg_eganglia_name = eganglia
+pkg_eganglia_description = Erlang library to interact with Ganglia
+pkg_eganglia_homepage = https://github.com/inaka/eganglia
+pkg_eganglia_fetch = git
+pkg_eganglia_repo = https://github.com/inaka/eganglia
+pkg_eganglia_commit = v0.9.1
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = 2.0.4
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/knutin/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/knutin/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = 0.2.4
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = 0.1.1
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = exec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = 1.2
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = v1.4.6
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards  and  for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gossiperl
+pkg_gossiperl_name = gossiperl
+pkg_gossiperl_description = Gossip middleware in Erlang
+pkg_gossiperl_homepage = http://gossiperl.com/
+pkg_gossiperl_fetch = git
+pkg_gossiperl_repo = https://github.com/gossiperl/gossiperl
+pkg_gossiperl_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = v4.1.1
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = 0.6.0
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/klarna/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/klarna/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = 0.3.3
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = 0.3
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/basho/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/basho/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/basho/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/basho/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = 0.1.0
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = Erlang MySQL Driver (from code.google.com)
+pkg_mysql_homepage = https://github.com/dizzyd/erlang-mysql-driver
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/dizzyd/erlang-mysql-driver
+pkg_mysql_commit = master
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += oauth2c
+pkg_oauth2c_name = oauth2c
+pkg_oauth2c_description = Erlang OAuth2 Client
+pkg_oauth2c_homepage = https://github.com/kivra/oauth2_client
+pkg_oauth2c_fetch = git
+pkg_oauth2c_repo = https://github.com/kivra/oauth2_client
+pkg_oauth2c_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = 1.0.0
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = 0.3
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = 0.4.0
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.1.0
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = 2.2.1
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = 0.1.0
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er    lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool    : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global process registry for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://github.com/krestenkrab/triq
+pkg_triq_fetch = git
+pkg_triq_repo = https://github.com/krestenkrab/triq
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = 0.3.0
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = v1.4.0
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = 1.0.3
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = 0.2.0
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit =  
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+       $(verbose) printf "%s\n" \
+               $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name:    $(1)") \
+               "App name:    $(pkg_$(1)_name)" \
+               "Description: $(pkg_$(1)_description)" \
+               "Home page:   $(pkg_$(1)_homepage)" \
+               "Fetch with:  $(pkg_$(1)_fetch)" \
+               "Repository:  $(pkg_$(1)_repo)" \
+               "Commit:      $(pkg_$(1)_commit)" \
+               ""
+
+endef
+
+search:
+ifdef q
+       $(foreach p,$(PACKAGES), \
+               $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+                       $(call pkg_print,$(p))))
+else
+       $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+dep_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+dep_repo = $(patsubst git://github.com/%,https://github.com/%, \
+       $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo)))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+       ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+       ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP   " $(1);
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS)
+ifndef IS_APP
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               $(MAKE) -C $$dep IS_APP=1 || exit $$?; \
+       done
+endif
+ifneq ($(IS_DEP),1)
+       $(verbose) rm -f $(ERLANG_MK_TMP)/deps.log
+endif
+       $(verbose) mkdir -p $(ERLANG_MK_TMP)
+       $(verbose) for dep in $(ALL_DEPS_DIRS) ; do \
+               if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+                       :; \
+               else \
+                       echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+                       if [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+                               $(MAKE) -C $$dep IS_DEP=1 || exit $$?; \
+                       else \
+                               echo "Error: No Makefile to build dependency $$dep."; \
+                               exit 2; \
+                       fi \
+               fi \
+       done
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+       if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+               if [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+                       $(call dep_autopatch2,$(1)); \
+               elif [ 0 != `grep -ci rebar $(DEPS_DIR)/$(1)/Makefile` ]; then \
+                       $(call dep_autopatch2,$(1)); \
+               elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i rebar '{}' \;`" ]; then \
+                       $(call dep_autopatch2,$(1)); \
+               else \
+                       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+                               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+                               $(call dep_autopatch_erlang_mk,$(1)); \
+                       else \
+                               $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
+                       fi \
+               fi \
+       else \
+               if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+                       $(call dep_autopatch_noop,$(1)); \
+               else \
+                       $(call dep_autopatch2,$(1)); \
+               fi \
+       fi
+endef
+
+define dep_autopatch2
+       $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+       if [ -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
+               $(call dep_autopatch_fetch_rebar); \
+               $(call dep_autopatch_rebar,$(1)); \
+       else \
+               $(call dep_autopatch_gen,$(1)); \
+       fi
+endef
+
+define dep_autopatch_noop
+       printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Overwrite erlang.mk with the current file by default.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+       echo "include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(DEPS_DIR)/app)/erlang.mk" \
+               > $(DEPS_DIR)/$1/erlang.mk
+endef
+else
+define dep_autopatch_erlang_mk
+       :
+endef
+endif
+
+define dep_autopatch_gen
+       printf "%s\n" \
+               "ERLC_OPTS = +debug_info" \
+               "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+define dep_autopatch_fetch_rebar
+       mkdir -p $(ERLANG_MK_TMP); \
+       if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+               git clone -q -n -- https://github.com/rebar/rebar $(ERLANG_MK_TMP)/rebar; \
+               cd $(ERLANG_MK_TMP)/rebar; \
+               git checkout -q 791db716b5a3a7671e0b351f95ddf24b848ee173; \
+               $(MAKE); \
+               cd -; \
+       fi
+endef
+
+define dep_autopatch_rebar
+       if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+               mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+       fi; \
+       $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+       rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+       application:load(rebar),
+       application:set_env(rebar, log_level, debug),
+       Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+               {ok, Conf0} -> Conf0;
+               _ -> []
+       end,
+       {Conf, OsEnv} = fun() ->
+               case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+                       false -> {Conf1, []};
+                       true ->
+                               Bindings0 = erl_eval:new_bindings(),
+                               Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+                               Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+                               Before = os:getenv(),
+                               {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+                               {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+               end
+       end(),
+       Write = fun (Text) ->
+               file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+       end,
+       Escape = fun (Text) ->
+               re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+       end,
+       Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+               "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+       Write("C_SRC_DIR = /path/do/not/exist\n"),
+       Write("C_SRC_TYPE = rebar\n"),
+       Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+       Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+       fun() ->
+               Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+               case lists:keyfind(erl_opts, 1, Conf) of
+                       false -> ok;
+                       {_, ErlOpts} ->
+                               lists:foreach(fun
+                                       ({d, D}) ->
+                                               Write("ERLC_OPTS += -D" ++ atom_to_list(D) ++ "=1\n");
+                                       ({i, I}) ->
+                                               Write(["ERLC_OPTS += -I ", I, "\n"]);
+                                       ({platform_define, Regex, D}) ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> Write("ERLC_OPTS += -D" ++ atom_to_list(D) ++ "=1\n");
+                                                       false -> ok
+                                               end;
+                                       ({parse_transform, PT}) ->
+                                               Write("ERLC_OPTS += +'{parse_transform, " ++ atom_to_list(PT) ++ "}'\n");
+                                       (_) -> ok
+                               end, ErlOpts)
+               end,
+               Write("\n")
+       end(),
+       fun() ->
+               File = case lists:keyfind(deps, 1, Conf) of
+                       false -> [];
+                       {_, Deps} ->
+                               [begin case case Dep of
+                                                       {N, S} when is_atom(N), is_list(S) -> {N, {hex, S}};
+                                                       {N, S} when is_tuple(S) -> {N, S};
+                                                       {N, _, S} -> {N, S};
+                                                       {N, _, S, _} -> {N, S};
+                                                       _ -> false
+                                               end of
+                                       false -> ok;
+                                       {Name, Source} ->
+                                               {Method, Repo, Commit} = case Source of
+                                                       {hex, V} -> {hex, V, undefined};
+                                                       {git, R} -> {git, R, master};
+                                                       {M, R, {branch, C}} -> {M, R, C};
+                                                       {M, R, {ref, C}} -> {M, R, C};
+                                                       {M, R, {tag, C}} -> {M, R, C};
+                                                       {M, R, C} -> {M, R, C}
+                                               end,
+                                               Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+                               end end || Dep <- Deps]
+               end
+       end(),
+       fun() ->
+               case lists:keyfind(erl_first_files, 1, Conf) of
+                       false -> ok;
+                       {_, Files} ->
+                               Names = [[" ", case lists:reverse(F) of
+                                       "lre." ++ Elif -> lists:reverse(Elif);
+                                       Elif -> lists:reverse(Elif)
+                               end] || "src/" ++ F <- Files],
+                               Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+               end
+       end(),
+       FindFirst = fun(F, Fd) ->
+               case io:parse_erl_form(Fd, undefined) of
+                       {ok, {attribute, _, compile, {parse_transform, PT}}, _} ->
+                               [PT, F(F, Fd)];
+                       {ok, {attribute, _, compile, CompileOpts}, _} when is_list(CompileOpts) ->
+                               case proplists:get_value(parse_transform, CompileOpts) of
+                                       undefined -> [F(F, Fd)];
+                                       PT -> [PT, F(F, Fd)]
+                               end;
+                       {ok, {attribute, _, include, Hrl}, _} ->
+                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
+                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
+                                       _ ->
+                                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ Hrl, [read]) of
+                                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
+                                                       _ -> [F(F, Fd)]
+                                               end
+                               end;
+                       {ok, {attribute, _, include_lib, "$(1)/include/" ++ Hrl}, _} ->
+                               {ok, HrlFd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]),
+                               [F(F, HrlFd), F(F, Fd)];
+                       {ok, {attribute, _, include_lib, Hrl}, _} ->
+                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
+                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
+                                       _ -> [F(F, Fd)]
+                               end;
+                       {ok, {attribute, _, import, {Imp, _}}, _} ->
+                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(Imp) ++ ".erl", [read]) of
+                                       {ok, ImpFd} -> [Imp, F(F, ImpFd), F(F, Fd)];
+                                       _ -> [F(F, Fd)]
+                               end;
+                       {eof, _} ->
+                               file:close(Fd),
+                               [];
+                       _ ->
+                               F(F, Fd)
+               end
+       end,
+       fun() ->
+               ErlFiles = filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)*.erl"),
+               First0 = lists:usort(lists:flatten([begin
+                       {ok, Fd} = file:open(F, [read]),
+                       FindFirst(FindFirst, Fd)
+               end || F <- ErlFiles])),
+               First = lists:flatten([begin
+                       {ok, Fd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", [read]),
+                       FindFirst(FindFirst, Fd)
+               end || M <- First0, lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)]) ++ First0,
+               Write(["COMPILE_FIRST +=", [[" ", atom_to_list(M)] || M <- First,
+                       lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)], "\n"])
+       end(),
+       Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+       Write("\npreprocess::\n"),
+       Write("\npre-deps::\n"),
+       Write("\npre-app::\n"),
+       PatchHook = fun(Cmd) ->
+               case Cmd of
+                       "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+                       "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+                       "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+                       "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+                       _ -> Escape(Cmd)
+               end
+       end,
+       fun() ->
+               case lists:keyfind(pre_hooks, 1, Conf) of
+                       false -> ok;
+                       {_, Hooks} ->
+                               [case H of
+                                       {'get-deps', Cmd} ->
+                                               Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+                                       {compile, Cmd} ->
+                                               Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+                                       {Regex, compile, Cmd} ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+                                                       false -> ok
+                                               end;
+                                       _ -> ok
+                               end || H <- Hooks]
+               end
+       end(),
+       ShellToMk = fun(V) ->
+               re:replace(re:replace(V, "(\\\\$$)(\\\\w*)", "\\\\1(\\\\2)", [global]),
+                       "-Werror\\\\b", "", [{return, list}, global])
+       end,
+       PortSpecs = fun() ->
+               case lists:keyfind(port_specs, 1, Conf) of
+                       false ->
+                               case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+                                       false -> [];
+                                       true ->
+                                               [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+                                                       proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+                               end;
+                       {_, Specs} ->
+                               lists:flatten([case S of
+                                       {Output, Input} -> {ShellToMk(Output), Input, []};
+                                       {Regex, Output, Input} ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> {ShellToMk(Output), Input, []};
+                                                       false -> []
+                                               end;
+                                       {Regex, Output, Input, [{env, Env}]} ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> {ShellToMk(Output), Input, Env};
+                                                       false -> []
+                                               end
+                               end || S <- Specs])
+               end
+       end(),
+       PortSpecWrite = fun (Text) ->
+               file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+       end,
+       case PortSpecs of
+               [] -> ok;
+               _ ->
+                       Write("\npre-app::\n\t$$\(MAKE) -f c_src/Makefile.erlang.mk\n"),
+                       PortSpecWrite(io_lib:format("ERL_CFLAGS = -finline-functions -Wall -fPIC -I ~s/erts-~s/include -I ~s\n",
+                               [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+                       PortSpecWrite(io_lib:format("ERL_LDFLAGS = -L ~s -lerl_interface -lei\n",
+                               [code:lib_dir(erl_interface, lib)])),
+                       [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+                       FilterEnv = fun(Env) ->
+                               lists:flatten([case E of
+                                       {_, _} -> E;
+                                       {Regex, K, V} ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> {K, V};
+                                                       false -> []
+                                               end
+                               end || E <- Env])
+                       end,
+                       MergeEnv = fun(Env) ->
+                               lists:foldl(fun ({K, V}, Acc) ->
+                                       case lists:keyfind(K, 1, Acc) of
+                                               false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+                                               {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+                                       end
+                               end, [], Env)
+                       end,
+                       PortEnv = case lists:keyfind(port_env, 1, Conf) of
+                               false -> [];
+                               {_, PortEnv0} -> FilterEnv(PortEnv0)
+                       end,
+                       PortSpec = fun ({Output, Input0, Env}) ->
+                               filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+                               Input = [[" ", I] || I <- Input0],
+                               PortSpecWrite([
+                                       [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+                                       case $(PLATFORM) of
+                                               darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+                                               _ -> ""
+                                       end,
+                                       "\n\nall:: ", Output, "\n\n",
+                                       "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+                                       "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+                                       "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+                                       "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+                                       [[Output, ": ", K, " = ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+                                       Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+                                               "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+                                       "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+                                       case filename:extension(Output) of
+                                               [] -> "\n";
+                                               _ -> " -shared\n"
+                                       end])
+                       end,
+                       [PortSpec(S) || S <- PortSpecs]
+       end,
+       Write("\ninclude $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(DEPS_DIR)/app)/erlang.mk"),
+       RunPlugin = fun(Plugin, Step) ->
+               case erlang:function_exported(Plugin, Step, 2) of
+                       false -> ok;
+                       true ->
+                               c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+                               Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+                                       dict:store(base_dir, "", dict:new())}, undefined),
+                               io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+               end
+       end,
+       fun() ->
+               case lists:keyfind(plugins, 1, Conf) of
+                       false -> ok;
+                       {_, Plugins} ->
+                               [begin
+                                       case lists:keyfind(deps, 1, Conf) of
+                                               false -> ok;
+                                               {_, Deps} ->
+                                                       case lists:keyfind(P, 1, Deps) of
+                                                               false -> ok;
+                                                               _ ->
+                                                                       Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+                                                                       io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+                                                                       io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+                                                                       code:add_patha(Path ++ "/ebin")
+                                                       end
+                                       end
+                               end || P <- Plugins],
+                               [case code:load_file(P) of
+                                       {module, P} -> ok;
+                                       _ ->
+                                               case lists:keyfind(plugin_dir, 1, Conf) of
+                                                       false -> ok;
+                                                       {_, PluginsDir} ->
+                                                               ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+                                                               {ok, P, Bin} = compile:file(ErlFile, [binary]),
+                                                               {module, P} = code:load_binary(P, ErlFile, Bin)
+                                               end
+                               end || P <- Plugins],
+                               [RunPlugin(P, preprocess) || P <- Plugins],
+                               [RunPlugin(P, pre_compile) || P <- Plugins],
+                               [RunPlugin(P, compile) || P <- Plugins]
+               end
+       end(),
+       halt()
+endef
+
+define dep_autopatch_app.erl
+       UpdateModules = fun(App) ->
+               case filelib:is_regular(App) of
+                       false -> ok;
+                       true ->
+                               {ok, [{application, '$(1)', L0}]} = file:consult(App),
+                               Mods = filelib:fold_files("$(call core_native_path,$(DEPS_DIR)/$1/src)", "\\\\.erl$$", true,
+                                       fun (F, Acc) -> [list_to_atom(filename:rootname(filename:basename(F)))|Acc] end, []),
+                               L = lists:keystore(modules, 1, L0, {modules, Mods}),
+                               ok = file:write_file(App, io_lib:format("~p.~n", [{application, '$(1)', L}]))
+               end
+       end,
+       UpdateModules("$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"),
+       halt()
+endef
+
+define dep_autopatch_appsrc.erl
+       AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+       AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+       case filelib:is_regular(AppSrcIn) of
+               false -> ok;
+               true ->
+                       {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+                       L1 = lists:keystore(modules, 1, L0, {modules, []}),
+                       L2 = case lists:keyfind(vsn, 1, L1) of {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, "git"}); _ -> L1 end,
+                       L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+                       ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+                       case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+       end,
+       halt()
+endef
+
+define dep_fetch_git
+       git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+       cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-submodule
+       git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+       hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+       cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+       svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+       cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_hex.erl
+       ssl:start(),
+       inets:start(),
+       {ok, {{_, 200, _}, _, Body}} = httpc:request(get,
+               {"https://s3.amazonaws.com/s3.hex.pm/tarballs/$(1)-$(2).tar", []},
+               [], [{body_format, binary}]),
+       {ok, Files} = erl_tar:extract({binary, Body}, [memory]),
+       {_, Source} = lists:keyfind("contents.tar.gz", 1, Files),
+       ok = erl_tar:extract({binary, Source}, [{cwd, "$(call core_native_path,$(DEPS_DIR)/$1)"}, compressed]),
+       halt()
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+       $(call erlang,$(call dep_fetch_hex.erl,$(1),$(strip $(word 2,$(dep_$(1))))));
+endef
+
+define dep_fetch_fail
+       echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+       exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+       $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+       git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+       cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_fetch
+       $(if $(dep_$(1)), \
+               $(if $(dep_fetch_$(word 1,$(dep_$(1)))), \
+                       $(word 1,$(dep_$(1))), \
+                       $(if $(IS_DEP),legacy,fail)), \
+               $(if $(filter $(1),$(PACKAGES)), \
+                       $(pkg_$(1)_fetch), \
+                       fail))
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1):
+       $(eval DEP_NAME := $(call dep_name,$1))
+       $(eval DEP_STR := $(if $(filter-out $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+       $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+               echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)."; \
+               exit 17; \
+       fi
+       $(verbose) mkdir -p $(DEPS_DIR)
+       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$1)),$1)
+       $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure.ac -o -f $(DEPS_DIR)/$(DEP_NAME)/configure.in ]; then \
+               echo " AUTO  " $(DEP_STR); \
+               cd $(DEPS_DIR)/$(DEP_NAME) && autoreconf -Wall -vif -I m4; \
+       fi
+       - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+               echo " CONF  " $(DEP_STR); \
+               cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+       fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+       $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+               if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+                       echo " PATCH  Downloading rabbitmq-codegen"; \
+                       git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+               fi; \
+               if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+                       echo " PATCH  Downloading rabbitmq-server"; \
+                       git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+               fi; \
+               ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+       elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+               if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+                       echo " PATCH  Downloading rabbitmq-codegen"; \
+                       git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+               fi \
+       else \
+               $$(call dep_autopatch,$(DEP_NAME)) \
+       fi
+endif
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               $(MAKE) -C $$dep clean IS_APP=1 || exit $$?; \
+       done
+
+distclean:: distclean-apps
+
+distclean-apps:
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               $(MAKE) -C $$dep distclean IS_APP=1 || exit $$?; \
+       done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+       $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/list-deps.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/list-doc-deps.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/list-rel-deps.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/list-test-deps.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/list-shell-deps.log
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+define core_dep_plugin
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endef
+
+$(foreach p,$(DEP_PLUGINS),\
+       $(eval $(if $(findstring /,$p),\
+               $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+               $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_SUFFIX ?= _dtl
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL   " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+define erlydtl_compile.erl
+       [begin
+               Module0 = case "$(strip $(DTL_FULL_PATH))" of
+                       "" ->
+                               filename:basename(F, ".dtl");
+                       _ ->
+                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
+                               re:replace(F2, "/",  "_",  [{return, list}, global])
+               end,
+               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+               case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
+                       ok -> ok;
+                       {ok, _} -> ok
+               end
+       end || F <- string:tokens("$(1)", " ")],
+       halt().
+endef
+
+ifneq ($(wildcard src/),)
+
+DTL_FILES = $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifdef DTL_FULL_PATH
+BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(subst /,_,$(DTL_FILES:$(DTL_PATH)%=%))))
+else
+BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(notdir $(DTL_FILES))))
+endif
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
+       @mkdir -p $(ERLANG_MK_TMP)
+       @if test -f $@; then \
+               touch $(DTL_FILES); \
+       fi
+       @touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+ebin/$(PROJECT).app:: $(DTL_FILES)
+       $(if $(strip $?),\
+               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?,-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/)))
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+define compile_proto
+       $(verbose) mkdir -p ebin/ include/
+       $(proto_verbose) $(call erlang,$(call compile_proto.erl,$(1)))
+       $(proto_verbose) erlc +debug_info -o ebin/ ebin/*.erl
+       $(verbose) rm ebin/*.erl
+endef
+
+define compile_proto.erl
+       [begin
+               Dir = filename:dirname(filename:dirname(F)),
+               protobuffs_compile:generate_source(F,
+                       [{output_include_dir, Dir ++ "/include"},
+                               {output_src_dir, Dir ++ "/ebin"}])
+       end || F <- string:tokens("$(1)", " ")],
+       halt().
+endef
+
+ifneq ($(wildcard src/),)
+ebin/$(PROJECT).app:: $(sort $(call core_find,src/,*.proto))
+       $(if $(strip $?),$(call compile_proto,$?))
+endif
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+       +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP   " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP   " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC  " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+       $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL  " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1  " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB   " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+ifeq ($(wildcard ebin/test),)
+app:: deps $(PROJECT).d
+       $(verbose) $(MAKE) --no-print-directory app-build
+else
+app:: clean deps $(PROJECT).d
+       $(verbose) $(MAKE) --no-print-directory app-build
+endif
+
+ifeq ($(wildcard src/$(PROJECT)_app.erl),)
+define app_file
+{application, $(PROJECT), [
+       {description, "$(PROJECT_DESCRIPTION)"},
+       {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+       {id$(comma)$(space)"$(1)"}$(comma))
+       {modules, [$(call comma_list,$(2))]},
+       {registered, []},
+       {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS))]}
+]}.
+endef
+else
+define app_file
+{application, $(PROJECT), [
+       {description, "$(PROJECT_DESCRIPTION)"},
+       {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+       {id$(comma)$(space)"$(1)"}$(comma))
+       {modules, [$(call comma_list,$(2))]},
+       {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+       {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS))]},
+       {mod, {$(PROJECT)_app, []}}
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+       $(verbose) :
+
+# Source files.
+
+ERL_FILES = $(sort $(call core_find,src/,*.erl))
+CORE_FILES = $(sort $(call core_find,src/,*.core))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+       $(verbose) mkdir -p include/
+       $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(1)
+       $(verbose) mv asn1/*.erl src/
+       $(verbose) mv asn1/*.hrl include/
+       $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+       $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+       $(verbose) mkdir -p include/ priv/mibs/
+       $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+       $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES = $(sort $(call core_find,src/,*.xrl))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES = $(sort $(call core_find,src/,*.yrl))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+       $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+       ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+       Modules = [{filename:basename(F, ".erl"), F} || F <- ErlFiles],
+       Add = fun (Dep, Acc) ->
+               case lists:keyfind(atom_to_list(Dep), 1, Modules) of
+                       {_, DepFile} -> [DepFile|Acc];
+                       false -> Acc
+               end
+       end,
+       AddHd = fun (Dep, Acc) ->
+               case {Dep, lists:keymember(Dep, 2, Modules)} of
+                       {"src/" ++ _, false} -> [Dep|Acc];
+                       {"include/" ++ _, false} -> [Dep|Acc];
+                       _ -> Acc
+               end
+       end,
+       CompileFirst = fun (Deps) ->
+               First0 = [case filename:extension(D) of
+                       ".erl" -> filename:basename(D, ".erl");
+                       _ -> []
+               end || D <- Deps],
+               case lists:usort(First0) of
+                       [] -> [];
+                       [[]] -> [];
+                       First -> ["COMPILE_FIRST +=", [[" ", F] || F <- First], "\n"]
+               end
+       end,
+       Depend = [begin
+               case epp:parse_file(F, ["include/"], []) of
+                       {ok, Forms} ->
+                               Deps = lists:usort(lists:foldl(fun
+                                       ({attribute, _, behavior, Dep}, Acc) -> Add(Dep, Acc);
+                                       ({attribute, _, behaviour, Dep}, Acc) -> Add(Dep, Acc);
+                                       ({attribute, _, compile, {parse_transform, Dep}}, Acc) -> Add(Dep, Acc);
+                                       ({attribute, _, file, {Dep, _}}, Acc) -> AddHd(Dep, Acc);
+                                       (_, Acc) -> Acc
+                               end, [], Forms)),
+                               case Deps of
+                                       [] -> "";
+                                       _ -> [F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n", CompileFirst(Deps)]
+                               end;
+                       {error, enoent} ->
+                               []
+               end
+       end || F <- ErlFiles],
+       ok = file:write_file("$(1)", Depend),
+       halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+       $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST)
+       @mkdir -p $(ERLANG_MK_TMP)
+       @if test -f $@; then \
+               touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+               touch -c $(PROJECT).d; \
+       fi
+       @touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+
+-include $(PROJECT).d
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+       $(verbose) mkdir -p ebin/
+
+define compile_erl
+       $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+               -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+       $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+       $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+       $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null || true))
+       $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+               $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+       $(app_verbose) printf "$(subst $(newline),\n,$(subst ",\",$(call app_file,$(GITDESCRIBE),$(MODULES))))" \
+               > ebin/$(PROJECT).app
+else
+       $(verbose) if [ -z "$$(grep -E '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+               echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk README for instructions." >&2; \
+               exit 1; \
+       fi
+       $(appsrc_verbose) cat src/$(PROJECT).app.src \
+               | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+               | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(GITDESCRIBE)\"}/" \
+               > ebin/$(PROJECT).app
+endif
+
+clean:: clean-app
+
+clean-app:
+       $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+               $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+               $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+               $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+               $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+       $(verbose) for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+       $(verbose) for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+       $(verbose) for dep in $(ALL_TEST_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir:
+       $(gen_verbose) erlc -v $(TEST_ERLC_OPTS) -I include/ -o $(TEST_DIR) \
+               $(call core_find,$(TEST_DIR)/,*.erl) -pa ebin/
+endif
+
+ifeq ($(wildcard ebin/test),)
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: clean deps test-deps $(PROJECT).d
+       $(verbose) $(MAKE) --no-print-directory app-build test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+       $(gen_verbose) touch ebin/test
+else
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: deps test-deps $(PROJECT).d
+       $(verbose) $(MAKE) --no-print-directory app-build test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+       $(gen_verbose) rm -f $(TEST_DIR)/*.beam
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+       $(if $(findstring +,$1),\
+               $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_rebar_config
+{deps, [$(call comma_list,$(foreach d,$(DEPS),\
+       {$(call dep_name,$d),".*",{git,"$(call dep_repo,$d)","$(call dep_commit,$d)"}}))]}.
+{erl_opts, [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$(ERLC_OPTS)),\
+       $(call compat_convert_erlc_opts,$o)))]}.
+endef
+
+$(eval _compat_rebar_config = $$(compat_rebar_config))
+$(eval export _compat_rebar_config)
+
+rebar.config:
+       $(gen_verbose) echo "$${_compat_rebar_config}" > rebar.config
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+
+docs:: asciidoc
+
+asciidoc: distclean-asciidoc doc-deps asciidoc-guide asciidoc-manual
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide:
+       a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+       a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+endif
+
+ifeq ($(wildcard doc/src/manual/*.asciidoc),)
+asciidoc-manual:
+else
+asciidoc-manual:
+       for f in doc/src/manual/*.asciidoc ; do \
+               a2x -v -f manpage $$f ; \
+       done
+       for s in $(MAN_SECTIONS); do \
+               mkdir -p doc/man$$s/ ; \
+               mv doc/src/manual/*.$$s doc/man$$s/ ; \
+               gzip doc/man$$s/*.$$s ; \
+       done
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+       for s in $(MAN_SECTIONS); do \
+               mkdir -p $(MAN_INSTALL_PATH)/man$$s/ ; \
+               install -g 0 -o 0 -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
+       done
+endif
+
+distclean:: distclean-asciidoc
+
+distclean-asciidoc:
+       $(gen_verbose) rm -rf doc/html/ doc/guide.pdf doc/man3/ doc/man7/
+
+# Copyright (c) 2014-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Bootstrap targets:" \
+               "  bootstrap          Generate a skeleton of an OTP application" \
+               "  bootstrap-lib      Generate a skeleton of an OTP library" \
+               "  bootstrap-rel      Generate the files needed to build a release" \
+               "  new-app n=NAME     Create a new local OTP application NAME" \
+               "  new-lib n=NAME     Create a new local OTP library NAME" \
+               "  new t=TPL n=NAME   Generate a module NAME based on the template TPL" \
+               "  new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+               "  list-templates     List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+       {description, ""},
+       {vsn, "0.1.0"},
+       {id, "git"},
+       {modules, []},
+       {registered, []},
+       {applications, [
+               kernel,
+               stdlib
+       ]},
+       {mod, {$p_app, []}},
+       {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+       {description, ""},
+       {vsn, "0.1.0"},
+       {id, "git"},
+       {modules, []},
+       {registered, []},
+       {applications, [
+               kernel,
+               stdlib
+       ]}
+]}.
+endef
+
+ifdef SP
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+
+include erlang.mk
+endef
+else
+define bs_Makefile
+PROJECT = $p
+include erlang.mk
+endef
+endif
+
+define bs_apps_Makefile
+PROJECT = $p
+include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+       $p_sup:start_link().
+
+stop(_State) ->
+       ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p]}.
+{extended_start_script, true}.
+{sys_config, "rel/sys.config"}.
+{vm_args, "rel/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+       supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+       Procs = [],
+       {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+       gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+       {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+       {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+       {noreply, State}.
+
+handle_info(_Info, State) ->
+       {noreply, State}.
+
+terminate(_Reason, _State) ->
+       ok.
+
+code_change(_OldVsn, State, _Extra) ->
+       {ok, State}.
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+       {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+       {ok, Req2} = cowboy_req:reply(200, Req),
+       {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+       ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+       gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+       {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+       {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+       {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+       {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+       {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+       {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+       ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+       {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+       {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+       {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+       ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+       {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+       {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+       {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+       {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+       Req2 = cowboy_req:compact(Req),
+       {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+       {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+       {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+       {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+       {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+       ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+       socket :: inet:socket(),
+       transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+       Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+       {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+       ok = ranch:accept_ack(Ref),
+       loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+       loop(State).
+endef
+
+# Plugin-specific targets.
+
+define render_template
+       $(verbose) printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+       $(error Error: src/ directory already exists)
+endif
+       $(eval p := $(PROJECT))
+       $(eval n := $(PROJECT)_sup)
+       $(call render_template,bs_Makefile,Makefile)
+       $(verbose) mkdir src/
+ifdef LEGACY
+       $(call render_template,bs_appsrc,src/$(PROJECT).app.src)
+endif
+       $(call render_template,bs_app,src/$(PROJECT)_app.erl)
+       $(call render_template,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+       $(error Error: src/ directory already exists)
+endif
+       $(eval p := $(PROJECT))
+       $(call render_template,bs_Makefile,Makefile)
+       $(verbose) mkdir src/
+ifdef LEGACY
+       $(call render_template,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+       $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard rel/),)
+       $(error Error: rel/ directory already exists)
+endif
+       $(eval p := $(PROJECT))
+       $(call render_template,bs_relx_config,relx.config)
+       $(verbose) mkdir rel/
+       $(call render_template,bs_sys_config,rel/sys.config)
+       $(call render_template,bs_vm_args,rel/vm.args)
+
+new-app:
+ifndef in
+       $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+       $(error Error: Application $in already exists)
+endif
+       $(eval p := $(in))
+       $(eval n := $(in)_sup)
+       $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+       $(call render_template,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+       $(call render_template,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+       $(call render_template,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+       $(call render_template,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+       $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+       $(error Error: Application $in already exists)
+endif
+       $(eval p := $(in))
+       $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+       $(call render_template,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+       $(call render_template,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+       $(error Error: src/ directory does not exist)
+endif
+ifndef t
+       $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef tpl_$(t)
+       $(error Unknown template)
+endif
+ifndef n
+       $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+       $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new t=$t n=$n in=
+else
+       $(call render_template,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+       $(verbose) echo Available templates: $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT).so
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),darwin)
+       CC ?= cc
+       CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall
+       LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+       CC ?= cc
+       CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+       CC ?= gcc
+       CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+
+LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C     " $(?F);
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP   " $(?F);
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD    " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+       $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+       $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+
+$(C_SRC_OUTPUT): $(OBJECTS)
+       $(verbose) mkdir -p priv/
+       $(link_verbose) $(CC) $(OBJECTS) \
+               $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+               -o $(C_SRC_OUTPUT)
+
+%.o: %.c
+       $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+       $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+       $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+       $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+       $(gen_verbose) rm -f $(C_SRC_OUTPUT) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+$(C_SRC_ENV):
+       $(verbose) $(ERL) -eval "file:write_file(\"$(C_SRC_ENV)\", \
+               io_lib:format( \
+                       \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+                       \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+                       \"ERL_INTERFACE_LIB_DIR ?= ~s~n\", \
+                       [code:root_dir(), erlang:system_info(version), \
+                       code:lib_dir(erl_interface, include), \
+                       code:lib_dir(erl_interface, lib)])), \
+               halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+       $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+       /* Initialize private data. */
+       *priv_data = NULL;
+
+       loads++;
+
+       return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+       /* Convert the private data to the new version. */
+       *priv_data = *old_priv_data;
+
+       loads++;
+
+       return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+       if (loads == 1) {
+               /* Destroy the private data. */
+       }
+
+       loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+       if (enif_is_atom(env, argv[0])) {
+               return enif_make_tuple2(env,
+                       enif_make_atom(env, "hello"),
+                       argv[0]);
+       }
+
+       return enif_make_tuple2(env,
+               enif_make_atom(env, "error"),
+               enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+       {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+       PrivDir = case code:priv_dir(?MODULE) of
+               {error, _} ->
+                       AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+                       filename:join(AppPath, "priv");
+               Path ->
+                       Path
+       end,
+       erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+       erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+       $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+       $(error Error: src/$n.erl already exists)
+endif
+ifdef in
+       $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+       $(verbose) mkdir -p $(C_SRC_DIR) src/
+       $(call render_template,bs_c_nif,$(C_SRC_DIR)/$n.c)
+       $(call render_template,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-setup distclean-kerl
+
+KERL ?= $(CURDIR)/kerl
+export KERL
+
+KERL_URL ?= https://raw.githubusercontent.com/yrashk/kerl/master/kerl
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+CI_INSTALL_DIR ?= $(HOME)/erlang
+CI_OTP ?=
+
+ifeq ($(strip $(CI_OTP)),)
+ci::
+else
+ci:: $(addprefix ci-,$(CI_OTP))
+
+ci-prepare: $(addprefix $(CI_INSTALL_DIR)/,$(CI_OTP))
+
+ci-setup::
+
+ci_verbose_0 = @echo " CI    " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$(1): $(CI_INSTALL_DIR)/$(1)
+       $(ci_verbose) \
+               PATH="$(CI_INSTALL_DIR)/$(1)/bin:$(PATH)" \
+               CI_OTP_RELEASE="$(1)" \
+               CT_OPTS="-label $(1)" \
+               $(MAKE) clean ci-setup tests
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp))))
+
+define ci_otp_target
+ifeq ($(wildcard $(CI_INSTALL_DIR)/$(1)),)
+$(CI_INSTALL_DIR)/$(1): $(KERL)
+       $(KERL) build git $(OTP_GIT) $(1) $(1)
+       $(KERL) install $(1) $(CI_INSTALL_DIR)/$(1)
+endif
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_otp_target,$(otp))))
+
+$(KERL):
+       $(gen_verbose) $(call core_http_get,$(KERL),$(KERL_URL))
+       $(verbose) chmod +x $(KERL)
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Continuous Integration targets:" \
+               "  ci          Run '$(MAKE) tests' on all configured Erlang versions." \
+               "" \
+               "The CI_OTP variable must be defined with the Erlang versions" \
+               "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+       $(gen_verbose) rm -rf $(KERL)
+endif
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+ifneq ($(wildcard $(TEST_DIR)),)
+       CT_SUITES ?= $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+else
+       CT_SUITES ?=
+endif
+
+# Core targets.
+
+tests:: ct
+
+distclean:: distclean-ct
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Common_test targets:" \
+               "  ct          Run all the common_test suites for this project" \
+               "" \
+               "All your common_test suites have their associated targets." \
+               "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+       -no_auto_compile \
+       -noinput \
+       -pa $(CURDIR)/ebin $(DEPS_DIR)/*/ebin $(TEST_DIR) \
+       -dir $(TEST_DIR) \
+       -logdir $(CURDIR)/logs
+
+ifeq ($(CT_SUITES),)
+ct:
+else
+ct: test-build
+       $(verbose) mkdir -p $(CURDIR)/logs/
+       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+       $(verbose) mkdir -p $(CURDIR)/logs/
+       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+       $(gen_verbose) rm -rf $(CURDIR)/logs/
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r src
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \
+       -Wunmatched_returns # -Wunderspecs
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Dialyzer targets:" \
+               "  plt         Build a PLT file for this project" \
+               "  dialyze     Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+$(DIALYZER_PLT): deps app
+       $(verbose) dialyzer --build_plt --apps erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS)
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+       $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze:
+else
+dialyze: $(DIALYZER_PLT)
+endif
+       $(verbose) dialyzer --no_native $(DIALYZER_DIRS) $(DIALYZER_OPTS)
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+
+# Core targets.
+
+docs:: distclean-edoc edoc
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: doc-deps
+       $(gen_verbose) $(ERL) -eval 'edoc:application($(PROJECT), ".", [$(EDOC_OPTS)]), halt().'
+
+distclean-edoc:
+       $(gen_verbose) rm -f doc/*.css doc/*.html doc/*.png doc/edoc-info
+
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: elvis distclean-elvis
+
+# Configuration.
+
+ELVIS_CONFIG ?= $(CURDIR)/elvis.config
+
+ELVIS ?= $(CURDIR)/elvis
+export ELVIS
+
+ELVIS_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis
+ELVIS_CONFIG_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis.config
+ELVIS_OPTS ?=
+
+# Core targets.
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Elvis targets:" \
+               "  elvis       Run Elvis using the local elvis.config or download the default otherwise"
+
+distclean:: distclean-elvis
+
+# Plugin-specific targets.
+
+$(ELVIS):
+       $(gen_verbose) $(call core_http_get,$(ELVIS),$(ELVIS_URL))
+       $(verbose) chmod +x $(ELVIS)
+
+$(ELVIS_CONFIG):
+       $(verbose) $(call core_http_get,$(ELVIS_CONFIG),$(ELVIS_CONFIG_URL))
+
+elvis: $(ELVIS) $(ELVIS_CONFIG)
+       $(verbose) $(ELVIS) rock -c $(ELVIS_CONFIG) $(ELVIS_OPTS)
+
+distclean-elvis:
+       $(gen_verbose) rm -rf $(ELVIS)
+
+# Copyright (c) 2014 Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+
+ESCRIPT_BEAMS ?= "ebin/*", "deps/*/ebin/*"
+ESCRIPT_SYS_CONFIG ?= "rel/sys.config"
+ESCRIPT_EMU_ARGS ?= -pa . \
+       -sasl errlog_type error \
+       -escript main $(ESCRIPT_NAME)
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_STATIC ?= "deps/*/priv/**", "priv/**"
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Escript targets:" \
+               "  escript     Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+# Based on https://github.com/synrc/mad/blob/master/src/mad_bundle.erl
+# Copyright (c) 2013 Maxim Sokhatsky, Synrc Research Center
+# Modified MIT License, https://github.com/synrc/mad/blob/master/LICENSE :
+# Software may only be used for the great good and the true happiness of all
+# sentient beings.
+
+define ESCRIPT_RAW
+'Read = fun(F) -> {ok, B} = file:read_file(filename:absname(F)), B end,'\
+'Files = fun(L) -> A = lists:concat([filelib:wildcard(X)||X<- L ]),'\
+'  [F || F <- A, not filelib:is_dir(F) ] end,'\
+'Squash = fun(L) -> [{filename:basename(F), Read(F) } || F <- L ] end,'\
+'Zip = fun(A, L) -> {ok,{_,Z}} = zip:create(A, L, [{compress,all},memory]), Z end,'\
+'Ez = fun(Escript) ->'\
+'  Static = Files([$(ESCRIPT_STATIC)]),'\
+'  Beams = Squash(Files([$(ESCRIPT_BEAMS), $(ESCRIPT_SYS_CONFIG)])),'\
+'  Archive = Beams ++ [{ "static.gz", Zip("static.gz", Static)}],'\
+'  escript:create(Escript, [ $(ESCRIPT_OPTIONS)'\
+'    {archive, Archive, [memory]},'\
+'    {shebang, "$(ESCRIPT_SHEBANG)"},'\
+'    {comment, "$(ESCRIPT_COMMENT)"},'\
+'    {emu_args, " $(ESCRIPT_EMU_ARGS)"}'\
+'  ]),'\
+'  file:change_mode(Escript, 8#755)'\
+'end,'\
+'Ez("$(ESCRIPT_NAME)"),'\
+'halt().'
+endef
+
+ESCRIPT_COMMAND = $(subst ' ',,$(ESCRIPT_RAW))
+
+escript:: distclean-escript deps app
+       $(gen_verbose) $(ERL) -eval $(ESCRIPT_COMMAND)
+
+distclean-escript:
+       $(gen_verbose) rm -f $(ESCRIPT_NAME)
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel distclean-relx-rel distclean-relx run
+
+# Configuration.
+
+RELX ?= $(CURDIR)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://github.com/erlware/relx/releases/download/v3.5.0/relx
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+       RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+       RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+endif
+endif
+
+distclean:: distclean-relx-rel distclean-relx
+
+# Plugin-specific targets.
+
+$(RELX):
+       $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+       $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+       $(verbose) $(RELX) -c $(RELX_CONFIG) $(RELX_OPTS)
+
+distclean-relx-rel:
+       $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+distclean-relx:
+       $(gen_verbose) rm -rf $(RELX)
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run:
+else
+
+define get_relx_release.erl
+       {ok, Config} = file:consult("$(RELX_CONFIG)"),
+       {release, {Name, _}, _} = lists:keyfind(release, 1, Config),
+       io:format("~s", [Name]),
+       halt(0).
+endef
+
+RELX_RELEASE = `$(call erlang,$(get_relx_release.erl))`
+
+run: all
+       $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_RELEASE)/bin/$(RELX_RELEASE) console
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Relx targets:" \
+               "  run         Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(APPS_DIR)/*/ebin $(DEPS_DIR)/*/ebin
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Shell targets:" \
+               "  shell       Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+       $(verbose) for dep in $(ALL_SHELL_DEPS_DIRS) ; do $(MAKE) -C $$dep ; done
+
+shell: build-shell-deps
+       $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+       code:add_pathsa(["$(CURDIR)/ebin", "$(DEPS_DIR)/*/ebin"]),
+       try
+               case $(1) of
+                       all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+                       module -> triq:check($(2));
+                       function -> triq:check($(2))
+               end
+       of
+               true -> halt(0);
+               _ -> halt(1)
+       catch error:undef ->
+               io:format("Undefined property or module~n"),
+               halt(0)
+       end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build
+       $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build
+       $(verbose) echo Testing $(t)/0
+       $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build
+       $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename $(wildcard ebin/*.beam))))))
+       $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+       XREF_ARGS :=
+else
+       XREF_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/0.2.2/xrefr
+
+# Core targets.
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Xref targets:" \
+               "  xref        Run Xrefr using $XREF_CONFIG as config file if defined"
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+       $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+       $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+       $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+       $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR = cover
+
+# Hook in coverage to ct
+
+ifdef COVER
+ifdef CT_RUN
+# All modules in 'ebin'
+COVER_MODS = $(notdir $(basename $(call core_ls,ebin/*.beam)))
+
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec:
+       $(verbose) echo Cover mods: $(COVER_MODS)
+       $(gen_verbose) printf "%s\n" \
+               '{incl_mods,[$(subst $(space),$(comma),$(COVER_MODS))]}.' \
+               '{export,"$(CURDIR)/ct.coverdata"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+       $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Cover targets:" \
+               "  cover-report  Generate a HTML coverage report from previously collected" \
+               "                cover data." \
+               "  all.coverdata Merge {eunit,ct}.coverdata into one coverdata file." \
+               "" \
+               "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+               "target tests additionally generates a HTML coverage report from the combined" \
+               "coverdata files from each of these testing tools. HTML reports can be disabled" \
+               "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out all.coverdata,$(wildcard *.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+       $(gen_verbose) rm -f *.coverdata ct.cover.spec
+
+# Merge all coverdata files into one.
+all.coverdata: $(COVERDATA)
+       $(gen_verbose) $(ERL) -eval ' \
+               $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),) \
+               cover:export("$@"), halt(0).'
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+       $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+       grep -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+       | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+       $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+       Ms = cover:imported_modules(),
+       [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+               ++ ".COVER.html", [html])  || M <- Ms],
+       Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+       EunitHrlMods = [$(EUNIT_HRL_MODS)],
+       Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+               true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+       TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+       TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+       TotalPerc = round(100 * TotalY / (TotalY + TotalN)),
+       {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+       io:format(F, "<!DOCTYPE html><html>~n"
+               "<head><meta charset=\"UTF-8\">~n"
+               "<title>Coverage report</title></head>~n"
+               "<body>~n", []),
+       io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+       io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+       [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+               "<td>~p%</td></tr>~n",
+               [M, M, round(100 * Y / (Y + N))]) || {M, {Y, N}} <- Report1],
+       How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+       Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+       io:format(F, "</table>~n"
+               "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+               "</body></html>", [How, Date]),
+       halt().
+endef
+
+cover-report:
+       $(gen_verbose) mkdir -p $(COVER_REPORT_DIR)
+       $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+       fetch-shell-deps
+
+ifneq ($(SKIP_DEPS),)
+fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps fetch-shell-deps:
+       @:
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+fetch-deps: $(ALL_DEPS_DIRS)
+fetch-doc-deps: $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+fetch-rel-deps: $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+fetch-test-deps: $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+fetch-shell-deps: $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+fetch-deps: $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+fetch-deps: $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+fetch-deps: $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+fetch-deps: $(ALL_SHELL_DEPS_DIRS)
+endif
+
+fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps fetch-shell-deps:
+ifndef IS_APP
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               $(MAKE) -C $$dep $@ IS_APP=1 || exit $$?; \
+       done
+endif
+ifneq ($(IS_DEP),1)
+       $(verbose) rm -f $(ERLANG_MK_TMP)/$@.log
+endif
+       $(verbose) mkdir -p $(ERLANG_MK_TMP)
+       $(verbose) for dep in $^ ; do \
+               if ! grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/$@.log; then \
+                       echo $$dep >> $(ERLANG_MK_TMP)/$@.log; \
+                       if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk)$$" \
+                        $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+                               $(MAKE) -C $$dep fetch-deps IS_DEP=1 || exit $$?; \
+                       fi \
+               fi \
+       done
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+       list-shell-deps
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+       $(verbose) :> $@
+else
+LIST_DIRS = $(ALL_DEPS_DIRS)
+LIST_DEPS = $(BUILD_DEPS) $(DEPS)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): fetch-deps
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): LIST_DIRS += $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): LIST_DEPS += $(DOC_DEPS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): fetch-doc-deps
+else
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): fetch-deps
+endif
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): LIST_DIRS += $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): LIST_DEPS += $(REL_DEPS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): fetch-rel-deps
+else
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): fetch-deps
+endif
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): LIST_DIRS += $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): LIST_DEPS += $(TEST_DEPS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): fetch-test-deps
+else
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): fetch-deps
+endif
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): LIST_DIRS += $(ALL_SHELL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): LIST_DEPS += $(SHELL_DEPS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): fetch-shell-deps
+else
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): fetch-deps
+endif
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ifneq ($(IS_DEP),1)
+       $(verbose) rm -f $@.orig
+endif
+ifndef IS_APP
+       $(verbose) for app in $(filter-out $(CURDIR),$(ALL_APPS_DIRS)); do \
+               $(MAKE) -C "$$app" --no-print-directory $@ IS_APP=1 || :; \
+       done
+endif
+       $(verbose) for dep in $(filter-out $(CURDIR),$(LIST_DIRS)); do \
+               if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk)$$" \
+                $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+                       $(MAKE) -C "$$dep" --no-print-directory $@ IS_DEP=1; \
+               fi; \
+       done
+       $(verbose) for dep in $(LIST_DEPS); do \
+               echo $(DEPS_DIR)/$$dep; \
+       done >> $@.orig
+ifndef IS_APP
+ifneq ($(IS_DEP),1)
+       $(verbose) sort < $@.orig | uniq > $@
+       $(verbose) rm -f $@.orig
+endif
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+ifneq ($(SKIP_DEPS),)
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+       @:
+else
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(IS_DEP),1)
+ifneq ($(filter doc,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+endif
+endif
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+       $(verbose) cat $^ | sort | uniq
+endif # ifneq ($(SKIP_DEPS),)
diff --git a/rabbitmq-server/deps/rabbitmq_top/priv/www/js/tmpl/ets_tables.ejs b/rabbitmq-server/deps/rabbitmq_top/priv/www/js/tmpl/ets_tables.ejs
new file mode 100644 (file)
index 0000000..591e492
--- /dev/null
@@ -0,0 +1,52 @@
+<h1>Top ETS Tables: <b><%= top.node %></b></h1>
+
+<p>
+  Node:
+  <select id="top-node-ets">
+  <% for (var i = 0; i < nodes.length; i++) { %>
+     <option name="#/top/<%= fmt_string(nodes[i].name) %>"<% if (nodes[i].name == top.node) { %>selected="selected"<% } %>><%= nodes[i].name %></option>
+  <% } %>
+  </select>
+
+  Rows:
+  <select id="row-count-ets">
+  <%
+    var row_counts = [20, 50, 100, 150];
+    for (var i = 0; i < row_counts.length; i++) {
+  %>
+    <option name="<%= row_counts[i] %>"
+        <% if (row_counts[i] == top.row_count) { %>selected="selected"<% } %>>
+    <%= row_counts[i] %></option>
+  <% } %>
+  </select>
+</p>
+
+<table class="list updatable">
+ <thead>
+  <tr>
+    <th><%= fmt_sort('Name', 'name') %></th>
+    <th><%= fmt_sort('Owner Name', 'owner_name') %></th>
+    <th><%= fmt_sort('Memory', 'memory') %></th>
+    <th><%= fmt_sort('Size', 'size') %></th>
+    <th><%= fmt_sort('Type', 'type') %></th>
+    <th>Protection</th>
+    <th>Compressed</th>
+  </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < top.ets_tables.length; i++) {
+    var table = top.ets_tables[i];
+%>
+  <tr<%= alt_rows(i)%>>
+    <td><%= fmt_string(table.name) %></td>
+    <td><%= fmt_string(table.owner_name) %></td>
+    <td><%= fmt_bytes(table.memory * 1.0) %></td>
+    <td><%= table.size %></td>
+    <td><%= table.type %></td>
+    <td><%= table.protection %></td>
+    <td><%= table.compressed %></td>
+  </tr>
+<% } %>
+ </tbody>
+</table>
diff --git a/rabbitmq-server/deps/rabbitmq_top/priv/www/js/tmpl/process.ejs b/rabbitmq-server/deps/rabbitmq_top/priv/www/js/tmpl/process.ejs
new file mode 100644 (file)
index 0000000..a567c6e
--- /dev/null
@@ -0,0 +1,54 @@
+<h1>Process: <b><%= fmt_string(process.pid) %></b></h1>
+
+<div class="updatable">
+  <table class="facts">
+    <tr>
+      <th>Type</th>
+      <td><%= fmt_remove_rabbit_prefix(process.name.type) %></td>
+    </tr>
+    <tr>
+      <th>Description</th>
+      <td><%= fmt_process_name(process) %></td>
+    </tr>
+    <tr>
+      <th>Memory</th>
+      <td><%= fmt_bytes(process.memory) %></td>
+    </tr>
+    <tr>
+      <th>Message queue length</th>
+      <td><%= process.message_queue_len %></td>
+    </tr>
+    <tr>
+      <th>Reductions / sec</th>
+      <td><%= fmt_reduction_delta(process.reduction_delta) %></td>
+    </tr>
+    <tr>
+      <th>Total reductions</th>
+      <td><%= process.reductions %></td>
+    </tr>
+    <tr>
+      <th>Status</th>
+      <td><%= process.status %></td>
+    </tr>
+    <tr>
+      <th>Trap exit</th>
+      <td><%= fmt_boolean(process.trap_exit) %></td>
+    </tr>
+    <tr>
+      <th>Links</th>
+      <td><%= fmt_pids(process.links) %></td>
+    </tr>
+    <tr>
+      <th>Monitors</th>
+      <td><%= fmt_pids(process.monitors) %></td>
+    </tr>
+    <tr>
+      <th>Monitored by</th>
+      <td><%= fmt_pids(process.monitored_by) %></td>
+    </tr>
+    <tr>
+      <th>Current stacktrace</th>
+      <td><pre><%= fmt_string(process.current_stacktrace) %></pre></td>
+    </tr>
+  </table>
+</div>
diff --git a/rabbitmq-server/deps/rabbitmq_top/priv/www/js/tmpl/processes.ejs b/rabbitmq-server/deps/rabbitmq_top/priv/www/js/tmpl/processes.ejs
new file mode 100644 (file)
index 0000000..ed863f2
--- /dev/null
@@ -0,0 +1,52 @@
+<h1>Top Processes: <b><%= top.node %></b></h1>
+
+<p>
+  Node:
+  <select id="top-node">
+  <% for (var i = 0; i < nodes.length; i++) { %>
+     <option name="#/top/<%= fmt_string(nodes[i].name) %>"<% if (nodes[i].name == top.node) { %>selected="selected"<% } %>><%= nodes[i].name %></option>
+  <% } %>
+  </select>
+
+  Rows:
+  <select id="row-count">
+  <%
+    var row_counts = [20, 50, 100, 150];
+    for (var i = 0; i < row_counts.length; i++) {
+  %>
+    <option name="<%= row_counts[i] %>"
+        <% if (row_counts[i] == top.row_count) { %>selected="selected"<% } %>>
+    <%= row_counts[i] %></option>
+  <% } %>
+  </select>
+</p>
+
+<table class="list updatable">
+ <thead>
+  <tr>
+    <th><%= fmt_sort('Process', 'pid') %></th>
+    <th>Description</th>
+    <th>Type</th>
+    <th><%= fmt_sort('Memory', 'memory') %></th>
+    <th><%= fmt_sort('Reductions / sec', 'reduction_delta') %></th>
+    <th>Message queue</th>
+    <th>Status</th>
+  </tr>
+ </thead>
+ <tbody>
+<%
+ for (var i = 0; i < top.processes.length; i++) {
+    var process = top.processes[i];
+%>
+  <tr<%= alt_rows(i)%>>
+    <td><%= link_pid(process.pid) %></td>
+    <td><%= fmt_process_name(process) %></td>
+    <td><%= fmt_remove_rabbit_prefix(process.name.type) %></td>
+    <td><%= fmt_bytes(process.memory * 1.0) %></td>
+    <td class="r"><%= fmt_reduction_delta(process.reduction_delta) %></td>
+    <td class="r"><%= process.message_queue_len %></td>
+    <td><%= process.status %></td>
+  </tr>
+<% } %>
+ </tbody>
+</table>
diff --git a/rabbitmq-server/deps/rabbitmq_top/priv/www/js/top.js b/rabbitmq-server/deps/rabbitmq_top/priv/www/js/top.js
new file mode 100644 (file)
index 0000000..c5c4225
--- /dev/null
@@ -0,0 +1,96 @@
+dispatcher_add(function(sammy) {
+    sammy.get('#/top', function() {
+            var nodes = JSON.parse(sync_get('/nodes'));
+            go_to('#/top/' + nodes[0].name + "/20");
+        });
+    sammy.get('#/top/ets', function() {
+            var nodes = JSON.parse(sync_get('/nodes'));
+            go_to('#/top/ets/' + nodes[0].name + "/20");
+        });
+    sammy.get('#/top/:node/:row_count', function() {
+            render({'top':   {path:    '/top/' + esc(this.params['node']),
+                              options: {sort: true,
+                                        row_count: this.params['row_count']}},
+                    'nodes': '/nodes'},
+                    'processes', '#/top');
+        });
+    sammy.get('#/top/ets/:node/:row_count', function() {
+            render({'top': {path:    '/top/ets/' + esc(this.params['node']),
+                            options: {sort: true,
+                                      row_count: this.params['row_count']}},
+                    'nodes': '/nodes'},
+                    'ets_tables', '#/top/ets');
+        });
+    sammy.get('#/process/:pid', function() {
+            render({'process': '/process/' + esc(this.params['pid'])},
+                    'process', '#/top');
+        });
+});
+
+NAVIGATION['Admin'][0]['Top Processes']  = ['#/top', 'administrator'];
+NAVIGATION['Admin'][0]['Top ETS Tables'] = ['#/top/ets', 'administrator'];
+
+$('select#top-node').live('change', function() {
+    go_to('#/top/' + $(this).val());
+});
+
+$('select#top-node-ets').live('change', function() {
+    go_to('#/top/ets' + $(this).val());
+});
+
+$('select#row-count').live('change', function() {
+    go_to('#/top/' + $('select#top-node').val() + "/" + $(this).val());
+});
+
+$('select#row-count-ets').live('change', function() {
+    go_to('#/top/ets/' + $('select#top-node-ets').val() + "/" + $(this).val());
+});
+
+function link_pid(name) {
+    return _link_to(name, '#/process/' + esc(name));
+}
+
+function fmt_process_name(process) {
+    if (process == undefined) return '';
+    var name = process.name;
+
+    if (name.supertype != undefined) {
+        if (name.supertype == 'channel') {
+            return link_channel(name.connection_name + ' (' +
+                                name.channel_number + ')');
+        }
+        else if (name.supertype == 'queue') {
+            return link_queue(name.vhost, name.queue_name);
+        }
+        else if (name.supertype == 'connection') {
+            return link_conn(name.connection_name);
+        }
+    }
+    else {
+        return '<b>' + name.name + '</b>';
+    }
+}
+
+function fmt_remove_rabbit_prefix(name) {
+    if (name == 'rabbit_amqqueue_process') return 'queue';
+
+    if (name.substring(0, 7) == 'rabbit_') {
+        return name.substring(7);
+    }
+    else {
+        return name;
+    }
+}
+
+function fmt_pids(pids) {
+    var txt = '';
+    for (var i = 0; i < pids.length; i++) {
+        txt += link_pid(pids[i]) + ' ';
+    }
+
+    return txt;
+}
+
+function fmt_reduction_delta(delta) {
+    return Math.round(delta / 5); // gen_server updates every 5s
+}
diff --git a/rabbitmq-server/deps/rabbitmq_top/rabbitmq-components.mk b/rabbitmq-server/deps/rabbitmq_top/rabbitmq-components.mk
new file mode 100644 (file)
index 0000000..eb9e9e3
--- /dev/null
@@ -0,0 +1,345 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# Automatically add rabbitmq-common to the dependencies, at least for
+# the Makefiles.
+ifneq ($(PROJECT),rabbit_common)
+ifneq ($(PROJECT),rabbitmq_public_umbrella)
+ifeq ($(filter rabbit_common,$(DEPS)),)
+DEPS += rabbit_common
+endif
+endif
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client                       = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit                            = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common                     = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0                  = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp        = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http        = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap        = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl       = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser    = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_clusterer                = git_rmq rabbitmq-clusterer $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen                  = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client            = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange      = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes        = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management        = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
+dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella          = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# FIXME: As of 2015-11-20, we depend on Ranch 1.2.1, but erlang.mk
+# defaults to Ranch 1.1.0. All projects depending indirectly on Ranch
+# needs to add "ranch" as a BUILD_DEPS. The list of projects needing
+# this workaround are:
+#     o  rabbitmq-web-stomp
+dep_ranch = git https://github.com/ninenines/ranch 1.2.1
+
+RABBITMQ_COMPONENTS = amqp_client \
+                     rabbit \
+                     rabbit_common \
+                     rabbitmq_amqp1_0 \
+                     rabbitmq_auth_backend_amqp \
+                     rabbitmq_auth_backend_http \
+                     rabbitmq_auth_backend_ldap \
+                     rabbitmq_auth_mechanism_ssl \
+                     rabbitmq_boot_steps_visualiser \
+                     rabbitmq_clusterer \
+                     rabbitmq_codegen \
+                     rabbitmq_consistent_hash_exchange \
+                     rabbitmq_delayed_message_exchange \
+                     rabbitmq_dotnet_client \
+                     rabbitmq_event_exchange \
+                     rabbitmq_federation \
+                     rabbitmq_federation_management \
+                     rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
+                     rabbitmq_lvc \
+                     rabbitmq_management \
+                     rabbitmq_management_agent \
+                     rabbitmq_management_exchange \
+                     rabbitmq_management_themes \
+                     rabbitmq_management_visualiser \
+                     rabbitmq_message_timestamp \
+                     rabbitmq_metronome \
+                     rabbitmq_mqtt \
+                     rabbitmq_objc_client \
+                     rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
+                     rabbitmq_rtopic_exchange \
+                     rabbitmq_sharding \
+                     rabbitmq_shovel \
+                     rabbitmq_shovel_management \
+                     rabbitmq_stomp \
+                     rabbitmq_test \
+                     rabbitmq_toke \
+                     rabbitmq_top \
+                     rabbitmq_tracing \
+                     rabbitmq_trust_store \
+                     rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
+                     rabbitmq_web_stomp \
+                     rabbitmq_web_stomp_examples \
+                     rabbitmq_website
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+       ref=$$(git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+       if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+base_rmq_ref := $(shell \
+       (git rev-parse --verify -q stable >/dev/null && \
+         git merge-base --is-ancestor $$(git merge-base master HEAD) stable && \
+         echo stable) || \
+       echo master)
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+#   - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+#   target's properties:
+#       eg. rabbitmq-common is replaced by rabbitmq-codegen
+#       eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Maccro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+#   1. /foo.git -> /bar.git
+#   2. /foo     -> /bar
+#   3. /foo/    -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)),                                    \
+                 $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))),       \
+                 $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+       fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+       fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+       if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+        git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+           fetch_url="$$$$fetch_url1"; \
+           push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+       elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+           fetch_url="$$$$fetch_url2"; \
+           push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+       fi; \
+       cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+       $(foreach ref,$(call dep_rmq_commits,$(1)), \
+         git checkout -q $(ref) >/dev/null 2>&1 || \
+         ) \
+       (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+         1>&2 && false) ) && \
+       (test "$$$$fetch_url" = "$$$$push_url" || \
+        git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+       @:
+
+prepare-dist::
+       @:
+
+# --------------------------------------------------------------------
+# Run a RabbitMQ node (moved from rabbitmq-run.mk as a workaround).
+# --------------------------------------------------------------------
+
+# Add "rabbit" to the build dependencies when the user wants to start
+# a broker or to the test dependencies when the user wants to test a
+# project.
+#
+# NOTE: This should belong to rabbitmq-run.mk. Unfortunately, it is
+# loaded *after* erlang.mk which is too late to add a dependency. That's
+# why rabbitmq-components.mk knows the list of targets which start a
+# broker and add "rabbit" to the dependencies in this case.
+
+ifneq ($(PROJECT),rabbit)
+ifeq ($(filter rabbit,$(DEPS) $(BUILD_DEPS)),)
+RUN_RMQ_TARGETS = run-broker \
+                 run-background-broker \
+                 run-node \
+                 run-background-node \
+                 start-background-node
+
+ifneq ($(filter $(RUN_RMQ_TARGETS),$(MAKECMDGOALS)),)
+BUILD_DEPS += rabbit
+endif
+endif
+
+ifeq ($(filter rabbit,$(DEPS) $(BUILD_DEPS) $(TEST_DEPS)),)
+ifneq ($(filter check tests tests-with-broker test,$(MAKECMDGOALS)),)
+TEST_DEPS += rabbit
+endif
+endif
+endif
+
+ifeq ($(filter rabbit_public_umbrella amqp_client rabbit_common rabbitmq_test,$(PROJECT)),)
+ifeq ($(filter rabbitmq_test,$(DEPS) $(BUILD_DEPS) $(TEST_DEPS)),)
+TEST_DEPS += rabbitmq_test
+endif
+endif
+
+# --------------------------------------------------------------------
+# rabbitmq-components.mk checks.
+# --------------------------------------------------------------------
+
+ifeq ($(PROJECT),rabbit_common)
+else ifdef SKIP_RMQCOMP_CHECK
+else ifeq ($(IS_DEP),1)
+else ifneq ($(filter co up,$(MAKECMDGOALS)),)
+else
+# In all other cases, rabbitmq-components.mk must be in sync.
+deps:: check-rabbitmq-components.mk
+fetch-deps: check-rabbitmq-components.mk
+endif
+
+# If this project is under the Umbrella project, we override $(DEPS_DIR)
+# to point to the Umbrella's one. We also disable `make distclean` so
+# $(DEPS_DIR) is not accidentally removed.
+
+ifneq ($(wildcard ../../UMBRELLA.md),)
+UNDER_UMBRELLA = 1
+else ifneq ($(wildcard UMBRELLA.md),)
+UNDER_UMBRELLA = 1
+endif
+
+ifeq ($(UNDER_UMBRELLA),1)
+ifneq ($(PROJECT),rabbitmq_public_umbrella)
+DEPS_DIR ?= $(abspath ..)
+
+distclean:: distclean-components
+       @:
+
+distclean-components:
+endif
+
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
+
+UPSTREAM_RMQ_COMPONENTS_MK = $(DEPS_DIR)/rabbit_common/mk/rabbitmq-components.mk
+
+check-rabbitmq-components.mk:
+       $(verbose) cmp -s rabbitmq-components.mk \
+               $(UPSTREAM_RMQ_COMPONENTS_MK) || \
+               (echo "error: rabbitmq-components.mk must be updated!" 1>&2; \
+                 false)
+
+ifeq ($(PROJECT),rabbit_common)
+rabbitmq-components-mk:
+       @:
+else
+rabbitmq-components-mk:
+       $(gen_verbose) cp -a $(UPSTREAM_RMQ_COMPONENTS_MK) .
+ifeq ($(DO_COMMIT),yes)
+       $(verbose) git diff --quiet rabbitmq-components.mk \
+       || git commit -m 'Update rabbitmq-components.mk' rabbitmq-components.mk
+endif
+endif
similarity index 60%
rename from rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_all.erl
rename to rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_app.erl
index 1f03958570a1a851fcbe357539f924ca0415e7df..a1e7e96f51c4b0e8e164426ff38337e81cf71232 100644 (file)
@@ -8,19 +8,19 @@
 %%   License for the specific language governing rights and limitations
 %%   under the License.
 %%
-%%   The Original Code is RabbitMQ Management Console.
+%%   The Original Code is RabbitMQ.
 %%
-%%   The Initial Developer of the Original Code is GoPivotal, Inc.
+%%   The Initial Developer of the Original Code is VMware, Inc.
 %%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_ws_test_all).
+-module(rabbit_top_app).
 
--export([all_tests/0]).
+-behaviour(application).
+-export([start/2, stop/1]).
 
-all_tests() ->
-    ok = eunit:test(rabbit_ws_test_cowboy_websocket, [verbose]),
-    ok = eunit:test(rabbit_ws_test_raw_websocket, [verbose]),
-    ok = eunit:test(rabbit_ws_test_sockjs_websocket, [verbose]),
-    ok.
+start(_Type, _StartArgs) ->
+    rabbit_top_sup:start_link().
 
+stop(_State) ->
+    ok.
diff --git a/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_extension.erl b/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_extension.erl
new file mode 100644 (file)
index 0000000..b45b088
--- /dev/null
@@ -0,0 +1,26 @@
+%%  The contents of this file are subject to the Mozilla Public License
+%%  Version 1.1 (the "License"); you may not use this file except in
+%%  compliance with the License. You may obtain a copy of the License
+%%  at http://www.mozilla.org/MPL/
+%%
+%%  Software distributed under the License is distributed on an "AS IS"
+%%  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%%  the License for the specific language governing rights and
+%%  limitations under the License.
+%%
+%%  The Original Code is RabbitMQ.
+%%
+%%  The Initial Developer of the Original Code is VMware, Inc.
+%%  Copyright (c) 2007-2012 VMware, Inc.  All rights reserved.
+%%
+
+-module(rabbit_top_extension).
+
+-behaviour(rabbit_mgmt_extension).
+
+-export([dispatcher/0, web_ui/0]).
+
+dispatcher() -> [{["top",        node], rabbit_top_wm_processes, []},
+                 {["top", "ets", node], rabbit_top_wm_ets_tables, []},
+                 {["process",    pid],  rabbit_top_wm_process, []}].
+web_ui()     -> [{javascript, <<"top.js">>}].
diff --git a/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_sup.erl b/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_sup.erl
new file mode 100644 (file)
index 0000000..f2603b2
--- /dev/null
@@ -0,0 +1,34 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ.
+%%
+%%   The Initial Developer of the Original Code is VMware, Inc.
+%%   Copyright (c) 2011-2012 VMware, Inc.  All rights reserved.
+%%
+
+-module(rabbit_top_sup).
+
+-behaviour(supervisor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+     supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+    Top = {rabbit_top_worker,
+           {rabbit_top_worker, start_link, []},
+           permanent, ?WORKER_WAIT, worker, [rabbit_top_worker]},
+    {ok, {{one_for_one, 10, 10}, [Top]}}.
+
diff --git a/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_util.erl b/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_util.erl
new file mode 100644 (file)
index 0000000..225c09f
--- /dev/null
@@ -0,0 +1,132 @@
+%%  The contents of this file are subject to the Mozilla Public License
+%%  Version 1.1 (the "License"); you may not use this file except in
+%%  compliance with the License. You may obtain a copy of the License
+%%  at http://www.mozilla.org/MPL/
+%%
+%%  Software distributed under the License is distributed on an "AS IS"
+%%  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%%  the License for the specific language governing rights and
+%%  limitations under the License.
+%%
+%%  The Original Code is RabbitMQ.
+%%
+%%  The Initial Developer of the Original Code is VMware, Inc.
+%%  Copyright (c) 2007-2012 VMware, Inc.  All rights reserved.
+%%
+
+-module(rabbit_top_util).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-export([toplist/3, fmt_all/1, fmt/1, obtain_name/1, safe_process_info/2]).
+
+toplist(Key, Count, List) ->
+    Sorted = lists:sublist(
+               lists:reverse(
+                 lists:keysort(1, [toplist(Key, I) || I <- List])), Count),
+    [add_name(Info) || {_, Info} <- Sorted].
+
+toplist(Key, Info) ->
+    {Key, Val} = lists:keyfind(Key, 1, Info),
+    {Val, Info}.
+
+add_name(Info) ->
+    {pid, Pid} = lists:keyfind(pid, 1, Info),
+    [{name, obtain_name(Pid)} | Info].
+
+fmt_all(Info) -> [{K, fmt(V)} || {K, V} <- Info].
+
+fmt(Pid) when is_pid(Pid) ->
+    list_to_binary(pid_to_list(Pid));
+fmt(Other) ->
+    list_to_binary(rabbit_misc:format("~p", [Other])).
+
+obtain_name(Pid) ->
+    lists:foldl(fun(Fun,  fail) -> Fun(Pid);
+                   (_Fun, Res)  -> Res
+                end, fail, [fun obtain_from_registered_name/1,
+                            fun obtain_from_process_name/1,
+                            fun obtain_from_initial_call/1]).
+
+obtain_from_registered_name(Pid) ->
+    case safe_process_info(Pid, registered_name) of
+        {registered_name, Name} -> [{type, registered},
+                                    {name, Name}];
+        _                       -> fail
+    end.
+
+obtain_from_process_name(Pid) ->
+    case safe_process_info(Pid, dictionary) of
+        {dictionary, Dict} ->
+            case lists:keyfind(process_name, 1, Dict) of
+                {process_name, Name} -> fmt_process_name(Name);
+                false                -> fail
+            end;
+        _ ->
+            fail
+    end.
+
+fmt_process_name({Type, {ConnName, ChNum}}) when is_binary(ConnName),
+                                                 is_integer(ChNum) ->
+    [{supertype,       channel},
+     {type,            Type},
+     {connection_name, ConnName},
+     {channel_number,  ChNum}];
+
+fmt_process_name({Type, #resource{virtual_host = VHost,
+                                  name         = Name}}) ->
+    [{supertype,  queue},
+     {type,       Type},
+     {queue_name, Name},
+     {vhost,      VHost}];
+
+fmt_process_name({Type, ConnName}) when is_binary(ConnName) ->
+    [{supertype,       connection},
+     {type,            Type},
+     {connection_name, ConnName}];
+
+fmt_process_name({Type, unknown}) -> %% probably some adapter thing
+    [{supertype,       connection},
+     {type,            Type},
+     {connection_name, unknown}].
+
+obtain_from_initial_call(Pid) ->
+    case initial_call(Pid) of
+        fail -> [{type, starting},
+                 {name, fmt(Pid)}];
+        MFA  -> case guess_initial_call(MFA) of
+                    fail -> [{type, unknown},
+                             {name, fmt(MFA)}];
+                    Name -> [{type, known},
+                             {name, Name}]
+                end
+    end.
+
+initial_call(Pid) ->
+    case initial_call_dict(Pid) of
+        fail -> case safe_process_info(Pid, initial_call) of
+                    {initial_call, MFA} -> MFA;
+                    _                   -> fail
+                end;
+        MFA  -> MFA
+    end.
+
+initial_call_dict(Pid) ->
+    case safe_process_info(Pid, dictionary) of
+        {dictionary, Dict} ->
+            case lists:keyfind('$initial_call', 1, Dict) of
+                {'$initial_call', MFA} -> MFA;
+                false                  -> fail
+            end;
+        _ ->
+            fail
+    end.
+
+guess_initial_call({supervisor, _F, _A})        -> supervisor;
+guess_initial_call({supervisor2, _F, _A})       -> supervisor;
+guess_initial_call({mochiweb_acceptor, _F, _A}) -> mochiweb_http;
+guess_initial_call(_MFA)                        -> fail.
+
+
+safe_process_info(Pid, Info) ->
+    rpc:call(node(Pid), erlang, process_info, [Pid, Info]).
diff --git a/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_wm_ets_tables.erl b/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_wm_ets_tables.erl
new file mode 100644 (file)
index 0000000..8b38a97
--- /dev/null
@@ -0,0 +1,64 @@
+%%  The contents of this file are subject to the Mozilla Public License
+%%  Version 1.1 (the "License"); you may not use this file except in
+%%  compliance with the License. You may obtain a copy of the License
+%%  at http://www.mozilla.org/MPL/
+%%
+%%  Software distributed under the License is distributed on an "AS IS"
+%%  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%%  the License for the specific language governing rights and
+%%  limitations under the License.
+%%
+%%  The Original Code is RabbitMQ.
+%%
+%%  The Initial Developer of the Original Code is VMware, Inc.
+%%  Copyright (c) 2007-2012 VMware, Inc.  All rights reserved.
+%%
+
+-module(rabbit_top_wm_ets_tables).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-include_lib("rabbitmq_management/include/rabbit_mgmt.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+   {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+    Sort = case wrq:get_qs_value("sort", ReqData) of
+               undefined -> memory;
+               Str       -> list_to_atom(Str)
+           end,
+    Node = b2a(rabbit_mgmt_util:id(node, ReqData)),
+    Order = case wrq:get_qs_value("sort_reverse", ReqData) of
+                "true" -> asc;
+                _      -> desc
+            end,
+    RowCount = case wrq:get_qs_value("row_count", ReqData) of
+                   undefined -> 20;
+                   List when is_list(List) -> list_to_integer(List)
+               end,
+    rabbit_mgmt_util:reply([{node,       Node},
+                            {row_count,  RowCount},
+                            {ets_tables, ets_tables(Node, Sort, Order, RowCount)}],
+                           ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+    rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+b2a(B) -> list_to_atom(binary_to_list(B)).
+
+ets_tables(Node, Sort, Order, RowCount) ->
+    [fmt(P) || P <- rabbit_top_worker:ets_tables(Node, Sort, Order, RowCount)].
+
+fmt(Info) ->
+    {owner, Pid} = lists:keyfind(owner, 1, Info),
+    Info1 = lists:keydelete(owner, 1, Info),
+    [{owner,  rabbit_top_util:fmt(Pid)} | Info1].
diff --git a/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_wm_process.erl b/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_wm_process.erl
new file mode 100644 (file)
index 0000000..17a893d
--- /dev/null
@@ -0,0 +1,76 @@
+%%  The contents of this file are subject to the Mozilla Public License
+%%  Version 1.1 (the "License"); you may not use this file except in
+%%  compliance with the License. You may obtain a copy of the License
+%%  at http://www.mozilla.org/MPL/
+%%
+%%  Software distributed under the License is distributed on an "AS IS"
+%%  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%%  the License for the specific language governing rights and
+%%  limitations under the License.
+%%
+%%  The Original Code is RabbitMQ.
+%%
+%%  The Initial Developer of the Original Code is VMware, Inc.
+%%  Copyright (c) 2007-2012 VMware, Inc.  All rights reserved.
+%%
+
+-module(rabbit_top_wm_process).
+
+-export([init/1, to_json/2, resource_exists/2, content_types_provided/2,
+         is_authorized/2]).
+
+-define(ADDITIONAL_INFO,
+        [current_stacktrace, trap_exit, links, monitors, monitored_by]).
+
+-include_lib("rabbitmq_management/include/rabbit_mgmt.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+   {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+    rabbit_mgmt_util:reply(proc(ReqData), ReqData, Context).
+
+resource_exists(ReqData, Context) ->
+    {case proc(ReqData) of
+         not_found -> false;
+         _         -> true
+     end, ReqData, Context}.
+
+is_authorized(ReqData, Context) ->
+    rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+proc(ReqData) ->
+    PidBin = rabbit_mgmt_util:id(pid, ReqData),
+    try list_to_pid(binary_to_list(PidBin)) of
+        Pid -> case rabbit_top_worker:proc(Pid) of
+                   {ok, Base} -> [{pid,  PidBin},
+                                  {name, rabbit_top_util:obtain_name(Pid)}] ++
+                                     Base ++
+                                     case rabbit_top_util:safe_process_info(
+                                            Pid, ?ADDITIONAL_INFO) of
+                                         undefined -> [];
+                                         Props     -> fmt(Props)
+                                     end;
+                   error      -> not_found
+               end
+    catch
+        error:badarg ->
+            not_found
+    end.
+
+
+fmt(Props) -> [{K, fmt(K, V)} || {K, V} <- Props].
+
+fmt(links,              V) -> [rabbit_top_util:fmt(P) || P <- V, is_pid(P)];
+fmt(monitors,           V) -> [rabbit_top_util:fmt(P) || {process, P} <- V];
+fmt(monitored_by,       V) -> [rabbit_top_util:fmt(P) || P <- V];
+fmt(current_stacktrace, V) -> rabbit_top_util:fmt(V);
+fmt(_K,                 V) -> V.
diff --git a/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_wm_processes.erl b/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_wm_processes.erl
new file mode 100644 (file)
index 0000000..4695c0a
--- /dev/null
@@ -0,0 +1,65 @@
+%%  The contents of this file are subject to the Mozilla Public License
+%%  Version 1.1 (the "License"); you may not use this file except in
+%%  compliance with the License. You may obtain a copy of the License
+%%  at http://www.mozilla.org/MPL/
+%%
+%%  Software distributed under the License is distributed on an "AS IS"
+%%  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%%  the License for the specific language governing rights and
+%%  limitations under the License.
+%%
+%%  The Original Code is RabbitMQ.
+%%
+%%  The Initial Developer of the Original Code is VMware, Inc.
+%%  Copyright (c) 2007-2012 VMware, Inc.  All rights reserved.
+%%
+
+-module(rabbit_top_wm_processes).
+
+-export([init/1, to_json/2, content_types_provided/2, is_authorized/2]).
+
+-include_lib("rabbitmq_management/include/rabbit_mgmt.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+-include_lib("webmachine/include/webmachine.hrl").
+
+%%--------------------------------------------------------------------
+
+init(_Config) -> {ok, #context{}}.
+
+content_types_provided(ReqData, Context) ->
+   {[{"application/json", to_json}], ReqData, Context}.
+
+to_json(ReqData, Context) ->
+    Sort = case wrq:get_qs_value("sort", ReqData) of
+               undefined -> reduction_delta;
+               Str       -> list_to_atom(Str)
+           end,
+    Node = b2a(rabbit_mgmt_util:id(node, ReqData)),
+    Order = case wrq:get_qs_value("sort_reverse", ReqData) of
+                "true" -> asc;
+                _      -> desc
+            end,
+    RowCount = case wrq:get_qs_value("row_count", ReqData) of
+                   undefined -> 20;
+                   List when is_list(List) -> list_to_integer(List)
+               end,
+    rabbit_mgmt_util:reply([{node,      Node},
+                            {row_count, RowCount},
+                            {processes, procs(Node, Sort, Order, RowCount)}],
+                           ReqData, Context).
+
+is_authorized(ReqData, Context) ->
+    rabbit_mgmt_util:is_authorized_admin(ReqData, Context).
+
+%%--------------------------------------------------------------------
+
+b2a(B) -> list_to_atom(binary_to_list(B)).
+
+procs(Node, Sort, Order, RowCount) ->
+    [fmt(P) || P <- rabbit_top_worker:procs(Node, Sort, Order, RowCount)].
+
+fmt(Info) ->
+    {pid, Pid} = lists:keyfind(pid, 1, Info),
+    Info1 = lists:keydelete(pid, 1, Info),
+    [{pid,  rabbit_top_util:fmt(Pid)},
+     {name, rabbit_top_util:obtain_name(Pid)} | Info1].
diff --git a/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_worker.erl b/rabbitmq-server/deps/rabbitmq_top/src/rabbit_top_worker.erl
new file mode 100644 (file)
index 0000000..d61a47c
--- /dev/null
@@ -0,0 +1,164 @@
+%%  The contents of this file are subject to the Mozilla Public License
+%%  Version 1.1 (the "License"); you may not use this file except in
+%%  compliance with the License. You may obtain a copy of the License
+%%  at http://www.mozilla.org/MPL/
+%%
+%%  Software distributed under the License is distributed on an "AS IS"
+%%  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%%  the License for the specific language governing rights and
+%%  limitations under the License.
+%%
+%%  The Original Code is RabbitMQ.
+%%
+%%  The Initial Developer of the Original Code is VMware, Inc.
+%%  Copyright (c) 2007-2011 VMware, Inc.  All rights reserved.
+%%
+
+-module(rabbit_top_worker).
+-behaviour(gen_server).
+
+-define(PROCESS_INFO, [memory, message_queue_len, reductions, status]).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+         terminate/2, code_change/3]).
+
+-export([procs/4, proc/1, ets_tables/4, ets_table/1]).
+
+-define(SERVER, ?MODULE).
+-define(MILLIS, 1000).
+-define(EVERY, 5).
+-define(SLEEP, ?EVERY * ?MILLIS).
+
+-record(state, {procs, ets_tables}).
+
+%%--------------------------------------------------------------------
+
+start_link() ->
+    gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+
+procs(Node, Key, Rev, Count) ->
+    gen_server:call({?SERVER, Node}, {procs, Key, Rev, Count}, infinity).
+
+proc(Pid) ->
+    gen_server:call({?SERVER, node(Pid)}, {proc, Pid}, infinity).
+
+ets_tables(Node, Key, Rev, Count) ->
+    gen_server:call({?SERVER, Node}, {ets_tables, Key, Rev, Count}, infinity).
+
+ets_table(Name) ->
+    table_info(Name).
+
+%%--------------------------------------------------------------------
+
+init([]) ->
+    ensure_timer(),
+    {ok, #state{procs = procs(dict:new()),
+                ets_tables = ets_tables([])}}.
+
+handle_call({ets_tables, Key, Order, Count}, _From,
+            State = #state{ets_tables = Tables}) ->
+    {reply, toplist(Key, Order, Count, Tables), State};
+
+handle_call({procs, Key, Order, Count}, _From, State = #state{procs = Procs}) ->
+    {reply, toplist(Key, Order, Count, flatten(Procs)), State};
+
+handle_call({proc, Pid}, _From, State = #state{procs = Procs}) ->
+    {reply, dict:find(Pid, Procs), State}.
+
+handle_cast(_Msg, State) ->
+    {noreply, State}.
+
+handle_info(_Msg, State = #state{procs = OldProcs, ets_tables = OldTables}) ->
+    ensure_timer(),
+    {noreply, State#state{procs = procs(OldProcs),
+                          ets_tables = ets_tables(OldTables)}};
+
+handle_info(_Msg, State) ->
+    {noreply, State}.
+
+terminate(_Reason, _State) ->
+    ok.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+%%--------------------------------------------------------------------
+
+ensure_timer() ->
+    erlang:send_after(?SLEEP, self(), update).
+
+procs(OldProcs) ->
+    lists:foldl(
+      fun(Pid, Procs) ->
+              case process_info(Pid, ?PROCESS_INFO) of
+                  undefined ->
+                      Procs;
+                  Props ->
+                      Delta = (reductions(Props) -
+                                   case dict:find(Pid, OldProcs) of
+                                       {ok, OldProps} -> reductions(OldProps);
+                                       error          -> 0
+                                   end) div ?EVERY,
+                      dict:store(
+                        Pid, [{reduction_delta, Delta} | Props], Procs)
+              end
+      end, dict:new(), processes()).
+
+reductions(Props) ->
+    {reductions, R} = lists:keyfind(reductions, 1, Props),
+    R.
+
+ets_tables(_OldTables) ->
+    lists:filtermap(
+        fun(Table) ->
+            case table_info(Table) of
+                undefined -> false;
+                Info      -> {true, Info}
+            end
+        end,
+        ets:all()).
+
+table_info(Table) when not is_atom(Table) -> undefined;
+table_info(TableName) when is_atom(TableName) ->
+    Info = lists:map(fun
+                        ({memory, MemWords}) -> {memory, bytes(MemWords)};
+                        (Other) -> Other
+                     end,
+                     ets:info(TableName)),
+    {owner, OwnerPid} = lists:keyfind(owner, 1, Info),
+    case process_info(OwnerPid, registered_name) of
+        []                           -> Info;
+        {registered_name, OwnerName} -> [{owner_name, OwnerName} | Info]
+    end.
+
+flatten(Procs) ->
+    dict:fold(fun(Name, Props, Rest) ->
+                      [[{pid, Name} | Props] | Rest]
+              end, [], Procs).
+
+%%--------------------------------------------------------------------
+
+toplist(Key, Order, Count, List) ->
+    RevFun = case Order of
+                 asc  -> fun (L) -> L end;
+                 desc -> fun lists:reverse/1
+             end,
+    Keyed = [toplist(Key, I) || I <- List],
+    Sorted = lists:sublist(RevFun(lists:keysort(1, Keyed)), Count),
+    [Info || {_, Info} <- Sorted].
+
+toplist(Key, Info) ->
+    % Do not crash if unknown sort key. Keep unsorted instead.
+    case lists:keyfind(Key, 1, Info) of
+        {Key, Val} -> {Val, Info};
+        false      -> {undefined, Info}
+    end.
+
+bytes(Words) ->  try
+                     Words * erlang:system_info(wordsize)
+                 catch
+                     _:_ -> 0
+                 end.
\ No newline at end of file
diff --git a/rabbitmq-server/deps/rabbitmq_top/src/rabbitmq_top.app.src b/rabbitmq-server/deps/rabbitmq_top/src/rabbitmq_top.app.src
new file mode 100644 (file)
index 0000000..38c7c3c
--- /dev/null
@@ -0,0 +1,7 @@
+{application, rabbitmq_top,
+ [{description, "RabbitMQ Top"},
+  {vsn, "3.6.5"},
+  {modules, []},
+  {registered, []},
+  {mod, {rabbit_top_app, []}},
+  {applications, [kernel, stdlib, rabbit, rabbitmq_management]}]}.
diff --git a/rabbitmq-server/deps/rabbitmq_tracing/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_tracing/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index 436a2a4346c0a9ffd3299f4c42def0866032e9af..d0ec067b58b92ac26ee7902c5310c0d89da68beb 100644 (file)
@@ -1,7 +1,7 @@
 PROJECT = rabbitmq_tracing
 
 DEPS = rabbitmq_management webmachine
-TEST_DEPS = rabbit
+TEST_DEPS += rabbit
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
@@ -13,9 +13,3 @@ ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
 include erlang.mk
-
-# --------------------------------------------------------------------
-# Testing.
-# --------------------------------------------------------------------
-
-WITH_BROKER_TEST_COMMANDS := eunit:test(rabbit_tracing_test,[verbose])
index 89bfe77e1fc258ca28c6dc52a29eb660e98ba751..16d1c445caf10dfd856b27d7fff1508872171d47 100644 (file)
@@ -1,7 +1,7 @@
 # RabbitMQ (Message) Tracing Plugin
 
 This is an opinionated tracing plugin that extends RabbitMQ management UI.
-It logs messages passing through vhosts with enabled tracing to a log
+It logs messages passing through vhosts [with enabled tracing](http://www.rabbitmq.com/firehose.html) to a log
 file.
 
 ## Usage
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index 34e2efa553876bceadd871156f0d625ddb0766c7..c467125cebf9f0bf1821e93283c79e7fd7ba90d5 100644 (file)
@@ -30,5 +30,5 @@ start_link(Args) -> supervisor2:start_link(?MODULE, Args).
 init(Args) ->
     {ok, {{one_for_one, 3, 10},
           [{consumer, {rabbit_tracing_consumer, start_link, [Args]},
-            transient, ?MAX_WAIT, worker,
+            transient, ?WORKER_WAIT, worker,
             [rabbit_tracing_consumer]}]}}.
index b1e09c760e4ddcd9d22079dcbaf3be33cf404424..7cccd6677be506ef6c6b98d3ed95b7b4b33e4f4b 100644 (file)
@@ -34,7 +34,7 @@ start_child(Id, Args) ->
     supervisor:start_child(
       ?SUPERVISOR,
       {Id, {rabbit_tracing_consumer_sup, start_link, [Args]},
-       temporary, ?MAX_WAIT, supervisor,
+       temporary, ?SUPERVISOR_WAIT, supervisor,
        [rabbit_tracing_consumer_sup]}).
 
 stop_child(Id) ->
@@ -46,5 +46,5 @@ stop_child(Id) ->
 
 init([]) -> {ok, {{one_for_one, 3, 10},
                   [{traces, {rabbit_tracing_traces, start_link, []},
-                    transient, ?MAX_WAIT, worker,
+                    transient, ?WORKER_WAIT, worker,
                     [rabbit_tracing_traces]}]}}.
index 608be4f46c6482f3f732cefe622c4eccc62e031e..46da6be0301b1db78349017ca861b3523db32c97 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_tracing,
  [{description, "RabbitMQ message logging / tracing"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {mod, {rabbit_tracing_app, []}},
similarity index 51%
rename from rabbitmq-server/deps/rabbitmq_tracing/test/src/rabbit_tracing_test.erl
rename to rabbitmq-server/deps/rabbitmq_tracing/test/rabbit_tracing_SUITE.erl
index 00300c867e3be16766945e515ba1c42845e73cc5..675d87ade9b56d81c58ed735ded1cd7ce89d014e 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_tracing_test).
+-module(rabbit_tracing_SUITE).
+
+-compile(export_all).
 
 -define(LOG_DIR, "/var/tmp/rabbitmq-tracing/").
 
+-include_lib("common_test/include/ct.hrl").
 -include_lib("eunit/include/eunit.hrl").
 -include_lib("amqp_client/include/amqp_client.hrl").
 -include_lib("rabbitmq_management/include/rabbit_mgmt_test.hrl").
 
 -import(rabbit_misc, [pget/2]).
 
-tracing_test() ->
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                tracing_test,
+                                tracing_validation_test
+                               ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    %% initializes httpc
+    inets:start(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE}
+      ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+
+tracing_test(Config) ->
     case filelib:is_dir(?LOG_DIR) of
         true -> {ok, Files} = file:list_dir(?LOG_DIR),
                 [ok = file:delete(?LOG_DIR ++ F) || F <- Files];
         _    -> ok
     end,
 
-    [] = http_get("/traces/%2f/"),
-    [] = http_get("/trace-files/"),
+    [] = http_get(Config, "/traces/%2f/"),
+    [] = http_get(Config, "/trace-files/"),
 
     Args = [{format,  <<"json">>},
             {pattern, <<"#">>}],
-    http_put("/traces/%2f/test", Args, ?NO_CONTENT),
+    http_put(Config, "/traces/%2f/test", Args, ?NO_CONTENT),
     assert_list([[{name,    <<"test">>},
                   {format,  <<"json">>},
-                  {pattern, <<"#">>}]], http_get("/traces/%2f/")),
+                  {pattern, <<"#">>}]], http_get(Config, "/traces/%2f/")),
     assert_item([{name,    <<"test">>},
                  {format,  <<"json">>},
-                 {pattern, <<"#">>}], http_get("/traces/%2f/test")),
+                 {pattern, <<"#">>}], http_get(Config, "/traces/%2f/test")),
 
-    {ok, Conn} = amqp_connection:start(#amqp_params_network{}),
-    {ok, Ch} = amqp_connection:open_channel(Conn),
+    Ch = rabbit_ct_client_helpers:open_channel(Config),
     amqp_channel:cast(Ch, #'basic.publish'{ exchange    = <<"amq.topic">>,
                                             routing_key = <<"key">> },
                       #amqp_msg{props   = #'P_basic'{},
                                 payload = <<"Hello world">>}),
 
-    amqp_channel:close(Ch),
-    amqp_connection:close(Conn),
+    rabbit_ct_client_helpers:close_channel(Ch),
 
     timer:sleep(100),
 
-    http_delete("/traces/%2f/test", ?NO_CONTENT),
-    [] = http_get("/traces/%2f/"),
-    assert_list([[{name, <<"test.log">>}]], http_get("/trace-files/")),
+    http_delete(Config, "/traces/%2f/test", ?NO_CONTENT),
+    [] = http_get(Config, "/traces/%2f/"),
+    assert_list([[{name, <<"test.log">>}]], http_get(Config, "/trace-files/")),
     %% This is a bit cheeky as the log is actually one JSON doc per
     %% line and we assume here it's only one line
     assert_item([{type,         <<"published">>},
                  {exchange,     <<"amq.topic">>},
                  {routing_keys, [<<"key">>]},
                  {payload,      base64:encode(<<"Hello world">>)}],
-                http_get("/trace-files/test.log")),
-    http_delete("/trace-files/test.log", ?NO_CONTENT),
-    ok.
+                http_get(Config, "/trace-files/test.log")),
+    http_delete(Config, "/trace-files/test.log", ?NO_CONTENT),
+
+    passed.
 
-tracing_validation_test() ->
+tracing_validation_test(Config) ->
     Path = "/traces/%2f/test",
-    http_put(Path, [{pattern,           <<"#">>}],    ?BAD_REQUEST),
-    http_put(Path, [{format,            <<"json">>}], ?BAD_REQUEST),
-    http_put(Path, [{format,            <<"ebcdic">>},
+    http_put(Config, Path, [{pattern,           <<"#">>}],    ?BAD_REQUEST),
+    http_put(Config, Path, [{format,            <<"json">>}], ?BAD_REQUEST),
+    http_put(Config, Path, [{format,            <<"ebcdic">>},
                     {pattern,           <<"#">>}],    ?BAD_REQUEST),
-    http_put(Path, [{format,            <<"text">>},
+    http_put(Config, Path, [{format,            <<"text">>},
                     {pattern,           <<"#">>},
                     {max_payload_bytes, <<"abc">>}],  ?BAD_REQUEST),
-    http_put(Path, [{format,            <<"json">>},
+    http_put(Config, Path, [{format,            <<"json">>},
                     {pattern,           <<"#">>},
                     {max_payload_bytes, 1000}],       ?NO_CONTENT),
-    http_delete(Path, ?NO_CONTENT),
-    ok.
+    http_delete(Config, Path, ?NO_CONTENT),
+
+    passed.
 
 %%---------------------------------------------------------------------------
 %% TODO: Below is copied from rabbit_mgmt_test_http,
-%%       should be moved into a shared library
+%%       should be moved to use rabbit_mgmt_test_util once rabbitmq_management
+%%       is moved to Common Test
 
-http_get(Path) ->
-    http_get(Path, ?OK).
+http_get(Config, Path) ->
+    http_get(Config, Path, ?OK).
 
-http_get(Path, CodeExp) ->
-    http_get(Path, "guest", "guest", CodeExp).
+http_get(Config, Path, CodeExp) ->
+    http_get(Config, Path, "guest", "guest", CodeExp).
 
-http_get(Path, User, Pass, CodeExp) ->
+http_get(Config, Path, User, Pass, CodeExp) ->
     {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
-        req(get, Path, [auth_header(User, Pass)]),
+        req(Config, get, Path, [auth_header(User, Pass)]),
     assert_code(CodeExp, CodeAct, "GET", Path, ResBody),
     decode(CodeExp, Headers, ResBody).
 
-http_put(Path, List, CodeExp) ->
-    http_put_raw(Path, format_for_upload(List), CodeExp).
+http_put(Config, Path, List, CodeExp) ->
+    http_put_raw(Config, Path, format_for_upload(List), CodeExp).
 
 format_for_upload(List) ->
     iolist_to_binary(mochijson2:encode({struct, List})).
 
-http_put_raw(Path, Body, CodeExp) ->
-    http_upload_raw(put, Path, Body, "guest", "guest", CodeExp).
+http_put_raw(Config, Path, Body, CodeExp) ->
+    http_upload_raw(Config, put, Path, Body, "guest", "guest", CodeExp).
 
-http_upload_raw(Type, Path, Body, User, Pass, CodeExp) ->
+http_upload_raw(Config, Type, Path, Body, User, Pass, CodeExp) ->
     {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
-        req(Type, Path, [auth_header(User, Pass)], Body),
+        req(Config, Type, Path, [auth_header(User, Pass)], Body),
     assert_code(CodeExp, CodeAct, Type, Path, ResBody),
     decode(CodeExp, Headers, ResBody).
 
-http_delete(Path, CodeExp) ->
-    http_delete(Path, "guest", "guest", CodeExp).
+http_delete(Config, Path, CodeExp) ->
+    http_delete(Config, Path, "guest", "guest", CodeExp).
 
-http_delete(Path, User, Pass, CodeExp) ->
+http_delete(Config, Path, User, Pass, CodeExp) ->
     {ok, {{_HTTP, CodeAct, _}, Headers, ResBody}} =
-        req(delete, Path, [auth_header(User, Pass)]),
+        req(Config, delete, Path, [auth_header(User, Pass)]),
     assert_code(CodeExp, CodeAct, "DELETE", Path, ResBody),
     decode(CodeExp, Headers, ResBody).
 
@@ -131,11 +185,23 @@ assert_code(CodeExp, CodeAct, Type, Path, Body) ->
                           path, Path, body, Body})
     end.
 
-req(Type, Path, Headers) ->
-    httpc:request(Type, {?PREFIX ++ Path, Headers}, ?HTTPC_OPTS, []).
+mgmt_port(Config) ->
+    config_port(Config, tcp_port_mgmt).
+
+config_port(Config, PortKey) ->
+    rabbit_ct_broker_helpers:get_node_config(Config, 0, PortKey).
+
+uri_base_from(Config) ->
+    binary_to_list(
+      rabbit_mgmt_format:print(
+        "http://localhost:~w/api",
+        [mgmt_port(Config)])).
+
+req(Config, Type, Path, Headers) ->
+    httpc:request(Type, {uri_base_from(Config) ++ Path, Headers}, ?HTTPC_OPTS, []).
 
-req(Type, Path, Headers, Body) ->
-    httpc:request(Type, {?PREFIX ++ Path, Headers, "application/json", Body},
+req(Config, Type, Path, Headers, Body) ->
+    httpc:request(Type, {uri_base_from(Config) ++ Path, Headers, "application/json", Body},
                   ?HTTPC_OPTS, []).
 
 decode(?OK, _Headers,  ResBody) -> cleanup(mochijson2:decode(ResBody));
diff --git a/rabbitmq-server/deps/rabbitmq_trust_store/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_trust_store/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
diff --git a/rabbitmq-server/deps/rabbitmq_trust_store/CONTRIBUTING.md b/rabbitmq-server/deps/rabbitmq_trust_store/CONTRIBUTING.md
new file mode 100644 (file)
index 0000000..45bbcbe
--- /dev/null
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/rabbitmq-server/deps/rabbitmq_trust_store/Makefile b/rabbitmq-server/deps/rabbitmq_trust_store/Makefile
new file mode 100644 (file)
index 0000000..a424567
--- /dev/null
@@ -0,0 +1,26 @@
+PROJECT = rabbitmq_trust_store
+
+## We need the Cowboy's test utilities
+TEST_DEPS = rabbit amqp_client ct_helper
+dep_ct_helper = git https://github.com/extend/ct_helper.git master
+
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+
+
+
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
+include erlang.mk
+
+
+# --------------------------------------------------------------------
+# Testing.
+# --------------------------------------------------------------------
+
diff --git a/rabbitmq-server/deps/rabbitmq_trust_store/README.md b/rabbitmq-server/deps/rabbitmq_trust_store/README.md
new file mode 100644 (file)
index 0000000..0934a54
--- /dev/null
@@ -0,0 +1,113 @@
+# RabbitMQ Certificate Trust Store
+
+This plugin provides support for TLS (x509) certificate whitelisting.
+All plugins which use the global TLS options will be configured with
+the same whitelist.
+
+## Rationale
+
+RabbitMQ can be configured to accepted self-signed certificates
+through various TLS socket options, namely the `ca_certs` and
+`partial_chain` properties. However, this configuration is largely static.
+There is no convenient means with which to change it in realtime, that
+is, without making configuration changes to TLS listening sockets.
+
+This plugin maintains a list of trusted .PEM formatted TLS (x509) certificates in a given
+directory, refreshing at configurable intervals, or when `rabbitmqctl
+eval 'rabbit_trust_store:refresh().'` is invoked. Said certificates are then used
+to verify inbound TLS connections for the entire RabbitMQ node (all plugins and protocols).
+The list is node-local.
+
+## RabbitMQ Version Requirements
+
+This plugin requires RabbitMQ `3.6.1` or later.
+
+## Installation and Binary Builds
+
+This plugin is now available from the [RabbitMQ community plugins page](http://www.rabbitmq.com/community-plugins.html).
+Please consult the docs on [how to install RabbitMQ plugins](http://www.rabbitmq.com/plugins.html#installing-plugins).
+
+## Usage
+
+Configure the trust store with a directory of whitelisted certificates
+and a refresh interval:
+
+```
+    {rabbitmq_trust_store,
+     [{directory,        "$HOME/rabbit/whitelist"}, %% trusted certificate directory path
+      {refresh_interval, {seconds, 30}}             %% refresh interval in seconds (only)
+    ]}
+```
+
+Setting `refresh_interval` to `0` seconds will disable automatic refresh.
+
+Certificates are distinguished by their **filenames** and file modification time.
+
+### Installing a Certificate
+
+Write a `PEM` formatted certificate file to the configured directory
+to whitelist it. This contains all the necessary information to
+authorize a client which presents the very same certificate to the
+server.
+
+### Removing a Certificate
+
+Delete the certificate file from the configured directory to remove it
+from the whitelist.
+
+> Note: TLS session caching bypasses the trust store certificate validation and can
+make it seem as if a removed certificate is still active. Disabling session caching
+in the broker by setting the `reuse_sessions` ssl option to `false` can be done if
+timely certificate removal is important.
+
+
+### Listing certificates
+
+To list the currently loaded certificates use the `rabbitmqctl` utility as follows:
+
+```
+    rabbitmqctl eval 'io:format(rabbit_trust_store:list()).'
+```
+
+This will output a formatted list of certificates similar to:
+
+```
+    Name: cert.pem
+    Serial: 1 | 0x1
+    Subject: O=client,CN=snowman.local
+    Issuer: L=87613,CN=MyTestRootCA
+    Validity: "2016-05-24T15:28:25Z - 2026-05-22T15:28:25Z"
+```
+
+Note that this command reads each certificate from disk in order to extract
+all the relevant information. If there are a large number of certificates in the
+trust store use this command sparingly.
+
+
+## How it Works
+
+When the trust-store starts it configures TLS listening sockets,
+whitelists the certificates in the given directory, then accepting
+sockets can query the trust-store with their client's certificate. It
+refreshes the whitelist to correspond with changes in the directory's
+contents, installing and removing certificate details, after a refresh
+interval or a manual refresh (by invoking a `rabbitmqctl eval
+'rabbit_trust_store:refresh().'` from the commandline).
+
+
+## Building from Source
+
+See [Plugin Development guide](http://www.rabbitmq.com/plugin-development.html).
+
+TL;DR: running
+
+    make dist
+
+will build the plugin and put build artifacts under the `./plugins` directory.
+
+
+## Copyright and License
+
+(c) Pivotal Software Inc, 2007-20016
+
+Released under the MPL, the same license as RabbitMQ.
diff --git a/rabbitmq-server/deps/rabbitmq_trust_store/erlang.mk b/rabbitmq-server/deps/rabbitmq_trust_store/erlang.mk
new file mode 100644 (file)
index 0000000..9f0c0c3
--- /dev/null
@@ -0,0 +1,6589 @@
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app deps search rel docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+
+ERLANG_MK_VERSION = 2.0.0-pre.2-16-gb52203c-dirty
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+gen_verbose_0 = @echo " GEN   " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A0 -noinput -boot start_clean
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+       $(verbose) :
+
+check:: clean app tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+       $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+distclean-tmp:
+       $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+       $(verbose) printf "%s\n" \
+               "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+               "Copyright (c) 2013-2015 Loïc Hoguin <essen@ninenines.eu>" \
+               "" \
+               "Usage: [V=1] $(MAKE) [target]..." \
+               "" \
+               "Core targets:" \
+               "  all           Run deps, app and rel targets in that order" \
+               "  app           Compile the project" \
+               "  deps          Fetch dependencies (if needed) and compile them" \
+               "  fetch-deps    Fetch dependencies (if needed) without compiling them" \
+               "  list-deps     Fetch dependencies (if needed) and list them" \
+               "  search q=...  Search for a package in the built-in index" \
+               "  rel           Build a release for this project, if applicable" \
+               "  docs          Build the documentation for this project" \
+               "  install-docs  Install the man pages for this project" \
+               "  check         Compile and run all tests and analysis for this project" \
+               "  tests         Run the tests for this project" \
+               "  clean         Delete temporary and output files from most targets" \
+               "  distclean     Delete all temporary and output files" \
+               "  help          Display this help and exit" \
+               "  erlang-mk     Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty)        $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $(2) -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(subst ",\",$(1)))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(subst \,\\\\,$(shell cygpath -w $1))
+else
+core_native_path = $1
+endif
+
+ifeq ($(shell which wget 2>/dev/null | wc -l), 1)
+define core_http_get
+       wget --no-check-certificate -O $(1) $(2)|| rm $(1)
+endef
+else
+define core_http_get.erl
+       ssl:start(),
+       inets:start(),
+       case httpc:request(get, {"$(2)", []}, [{autoredirect, true}], []) of
+               {ok, {{_, 200, _}, _, Body}} ->
+                       case file:write_file("$(1)", Body) of
+                               ok -> ok;
+                               {error, R1} -> halt(R1)
+                       end;
+               {error, R2} ->
+                       halt(R2)
+       end,
+       halt(0).
+endef
+
+define core_http_get
+       $(call erlang,$(call core_http_get.erl,$(call core_native_path,$1),$2))
+endef
+endif
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) -type f -name $(subst *,\*,$2)))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk:
+       git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ifdef ERLANG_MK_COMMIT
+       cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+endif
+       if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+       $(MAKE) -C $(ERLANG_MK_BUILD_DIR)
+       cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+       rm -rf $(ERLANG_MK_BUILD_DIR)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = 1.0.4
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/riverrun/branglecrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/riverrun/branglecrypt
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = master
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = v0.1.2
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY.  It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += classifier
+pkg_classifier_name = classifier
+pkg_classifier_description = An Erlang Bayesian Filter and Text Classifier
+pkg_classifier_homepage = https://github.com/inaka/classifier
+pkg_classifier_fetch = git
+pkg_classifier_repo = https://github.com/inaka/classifier
+pkg_classifier_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.1
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.1
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dhtcrawler
+pkg_dhtcrawler_name = dhtcrawler
+pkg_dhtcrawler_description = dhtcrawler is a DHT crawler written in erlang. It can join a DHT network and crawl many P2P torrents.
+pkg_dhtcrawler_homepage = https://github.com/kevinlynx/dhtcrawler
+pkg_dhtcrawler_fetch = git
+pkg_dhtcrawler_repo = https://github.com/kevinlynx/dhtcrawler
+pkg_dhtcrawler_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D    NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dtl
+pkg_dtl_name = dtl
+pkg_dtl_description = Django Template Language: A full-featured port of the Django template engine to Erlang.
+pkg_dtl_homepage = https://github.com/oinksoft/dtl
+pkg_dtl_fetch = git
+pkg_dtl_repo = https://github.com/oinksoft/dtl
+pkg_dtl_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += eganglia
+pkg_eganglia_name = eganglia
+pkg_eganglia_description = Erlang library to interact with Ganglia
+pkg_eganglia_homepage = https://github.com/inaka/eganglia
+pkg_eganglia_fetch = git
+pkg_eganglia_repo = https://github.com/inaka/eganglia
+pkg_eganglia_commit = v0.9.1
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = 2.0.4
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/knutin/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/knutin/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = 0.2.4
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = 0.1.1
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = exec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = 1.2
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = v1.4.6
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards  and  for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gossiperl
+pkg_gossiperl_name = gossiperl
+pkg_gossiperl_description = Gossip middleware in Erlang
+pkg_gossiperl_homepage = http://gossiperl.com/
+pkg_gossiperl_fetch = git
+pkg_gossiperl_repo = https://github.com/gossiperl/gossiperl
+pkg_gossiperl_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = v4.1.1
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = 0.6.0
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/klarna/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/klarna/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = 0.3.3
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = 0.3
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/basho/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/basho/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/basho/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/basho/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = 0.1.0
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = Erlang MySQL Driver (from code.google.com)
+pkg_mysql_homepage = https://github.com/dizzyd/erlang-mysql-driver
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/dizzyd/erlang-mysql-driver
+pkg_mysql_commit = master
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += oauth2c
+pkg_oauth2c_name = oauth2c
+pkg_oauth2c_description = Erlang OAuth2 Client
+pkg_oauth2c_homepage = https://github.com/kivra/oauth2_client
+pkg_oauth2c_fetch = git
+pkg_oauth2c_repo = https://github.com/kivra/oauth2_client
+pkg_oauth2c_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = 1.0.0
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = 0.3
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = 0.4.0
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.1.0
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = 2.2.1
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = 0.1.0
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er    lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool    : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global process registry for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://github.com/krestenkrab/triq
+pkg_triq_fetch = git
+pkg_triq_repo = https://github.com/krestenkrab/triq
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = 0.3.0
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = v1.4.0
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = 1.0.3
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = 0.2.0
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit =  
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+       $(verbose) printf "%s\n" \
+               $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name:    $(1)") \
+               "App name:    $(pkg_$(1)_name)" \
+               "Description: $(pkg_$(1)_description)" \
+               "Home page:   $(pkg_$(1)_homepage)" \
+               "Fetch with:  $(pkg_$(1)_fetch)" \
+               "Repository:  $(pkg_$(1)_repo)" \
+               "Commit:      $(pkg_$(1)_commit)" \
+               ""
+
+endef
+
+search:
+ifdef q
+       $(foreach p,$(PACKAGES), \
+               $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+                       $(call pkg_print,$(p))))
+else
+       $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+dep_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+dep_repo = $(patsubst git://github.com/%,https://github.com/%, \
+       $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo)))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+       ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+       ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP   " $(1);
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS)
+ifndef IS_APP
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               $(MAKE) -C $$dep IS_APP=1 || exit $$?; \
+       done
+endif
+ifneq ($(IS_DEP),1)
+       $(verbose) rm -f $(ERLANG_MK_TMP)/deps.log
+endif
+       $(verbose) mkdir -p $(ERLANG_MK_TMP)
+       $(verbose) for dep in $(ALL_DEPS_DIRS) ; do \
+               if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+                       :; \
+               else \
+                       echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+                       if [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+                               $(MAKE) -C $$dep IS_DEP=1 || exit $$?; \
+                       else \
+                               echo "Error: No Makefile to build dependency $$dep."; \
+                               exit 2; \
+                       fi \
+               fi \
+       done
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+       if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+               if [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+                       $(call dep_autopatch2,$(1)); \
+               elif [ 0 != `grep -ci rebar $(DEPS_DIR)/$(1)/Makefile` ]; then \
+                       $(call dep_autopatch2,$(1)); \
+               elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i rebar '{}' \;`" ]; then \
+                       $(call dep_autopatch2,$(1)); \
+               else \
+                       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+                               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+                               $(call dep_autopatch_erlang_mk,$(1)); \
+                       else \
+                               $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
+                       fi \
+               fi \
+       else \
+               if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+                       $(call dep_autopatch_noop,$(1)); \
+               else \
+                       $(call dep_autopatch2,$(1)); \
+               fi \
+       fi
+endef
+
+define dep_autopatch2
+       $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+       if [ -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
+               $(call dep_autopatch_fetch_rebar); \
+               $(call dep_autopatch_rebar,$(1)); \
+       else \
+               $(call dep_autopatch_gen,$(1)); \
+       fi
+endef
+
+define dep_autopatch_noop
+       printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Overwrite erlang.mk with the current file by default.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+       echo "include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(DEPS_DIR)/app)/erlang.mk" \
+               > $(DEPS_DIR)/$1/erlang.mk
+endef
+else
+define dep_autopatch_erlang_mk
+       :
+endef
+endif
+
+define dep_autopatch_gen
+       printf "%s\n" \
+               "ERLC_OPTS = +debug_info" \
+               "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+define dep_autopatch_fetch_rebar
+       mkdir -p $(ERLANG_MK_TMP); \
+       if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+               git clone -q -n -- https://github.com/rebar/rebar $(ERLANG_MK_TMP)/rebar; \
+               cd $(ERLANG_MK_TMP)/rebar; \
+               git checkout -q 791db716b5a3a7671e0b351f95ddf24b848ee173; \
+               $(MAKE); \
+               cd -; \
+       fi
+endef
+
+define dep_autopatch_rebar
+       if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+               mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+       fi; \
+       $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+       rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+       application:load(rebar),
+       application:set_env(rebar, log_level, debug),
+       Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+               {ok, Conf0} -> Conf0;
+               _ -> []
+       end,
+       {Conf, OsEnv} = fun() ->
+               case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+                       false -> {Conf1, []};
+                       true ->
+                               Bindings0 = erl_eval:new_bindings(),
+                               Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+                               Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+                               Before = os:getenv(),
+                               {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+                               {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+               end
+       end(),
+       Write = fun (Text) ->
+               file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+       end,
+       Escape = fun (Text) ->
+               re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+       end,
+       Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+               "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+       Write("C_SRC_DIR = /path/do/not/exist\n"),
+       Write("C_SRC_TYPE = rebar\n"),
+       Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+       Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+       fun() ->
+               Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+               case lists:keyfind(erl_opts, 1, Conf) of
+                       false -> ok;
+                       {_, ErlOpts} ->
+                               lists:foreach(fun
+                                       ({d, D}) ->
+                                               Write("ERLC_OPTS += -D" ++ atom_to_list(D) ++ "=1\n");
+                                       ({i, I}) ->
+                                               Write(["ERLC_OPTS += -I ", I, "\n"]);
+                                       ({platform_define, Regex, D}) ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> Write("ERLC_OPTS += -D" ++ atom_to_list(D) ++ "=1\n");
+                                                       false -> ok
+                                               end;
+                                       ({parse_transform, PT}) ->
+                                               Write("ERLC_OPTS += +'{parse_transform, " ++ atom_to_list(PT) ++ "}'\n");
+                                       (_) -> ok
+                               end, ErlOpts)
+               end,
+               Write("\n")
+       end(),
+       fun() ->
+               File = case lists:keyfind(deps, 1, Conf) of
+                       false -> [];
+                       {_, Deps} ->
+                               [begin case case Dep of
+                                                       {N, S} when is_atom(N), is_list(S) -> {N, {hex, S}};
+                                                       {N, S} when is_tuple(S) -> {N, S};
+                                                       {N, _, S} -> {N, S};
+                                                       {N, _, S, _} -> {N, S};
+                                                       _ -> false
+                                               end of
+                                       false -> ok;
+                                       {Name, Source} ->
+                                               {Method, Repo, Commit} = case Source of
+                                                       {hex, V} -> {hex, V, undefined};
+                                                       {git, R} -> {git, R, master};
+                                                       {M, R, {branch, C}} -> {M, R, C};
+                                                       {M, R, {ref, C}} -> {M, R, C};
+                                                       {M, R, {tag, C}} -> {M, R, C};
+                                                       {M, R, C} -> {M, R, C}
+                                               end,
+                                               Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+                               end end || Dep <- Deps]
+               end
+       end(),
+       fun() ->
+               case lists:keyfind(erl_first_files, 1, Conf) of
+                       false -> ok;
+                       {_, Files} ->
+                               Names = [[" ", case lists:reverse(F) of
+                                       "lre." ++ Elif -> lists:reverse(Elif);
+                                       Elif -> lists:reverse(Elif)
+                               end] || "src/" ++ F <- Files],
+                               Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+               end
+       end(),
+       FindFirst = fun(F, Fd) ->
+               case io:parse_erl_form(Fd, undefined) of
+                       {ok, {attribute, _, compile, {parse_transform, PT}}, _} ->
+                               [PT, F(F, Fd)];
+                       {ok, {attribute, _, compile, CompileOpts}, _} when is_list(CompileOpts) ->
+                               case proplists:get_value(parse_transform, CompileOpts) of
+                                       undefined -> [F(F, Fd)];
+                                       PT -> [PT, F(F, Fd)]
+                               end;
+                       {ok, {attribute, _, include, Hrl}, _} ->
+                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
+                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
+                                       _ ->
+                                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ Hrl, [read]) of
+                                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
+                                                       _ -> [F(F, Fd)]
+                                               end
+                               end;
+                       {ok, {attribute, _, include_lib, "$(1)/include/" ++ Hrl}, _} ->
+                               {ok, HrlFd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]),
+                               [F(F, HrlFd), F(F, Fd)];
+                       {ok, {attribute, _, include_lib, Hrl}, _} ->
+                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
+                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
+                                       _ -> [F(F, Fd)]
+                               end;
+                       {ok, {attribute, _, import, {Imp, _}}, _} ->
+                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(Imp) ++ ".erl", [read]) of
+                                       {ok, ImpFd} -> [Imp, F(F, ImpFd), F(F, Fd)];
+                                       _ -> [F(F, Fd)]
+                               end;
+                       {eof, _} ->
+                               file:close(Fd),
+                               [];
+                       _ ->
+                               F(F, Fd)
+               end
+       end,
+       fun() ->
+               ErlFiles = filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)*.erl"),
+               First0 = lists:usort(lists:flatten([begin
+                       {ok, Fd} = file:open(F, [read]),
+                       FindFirst(FindFirst, Fd)
+               end || F <- ErlFiles])),
+               First = lists:flatten([begin
+                       {ok, Fd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", [read]),
+                       FindFirst(FindFirst, Fd)
+               end || M <- First0, lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)]) ++ First0,
+               Write(["COMPILE_FIRST +=", [[" ", atom_to_list(M)] || M <- First,
+                       lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)], "\n"])
+       end(),
+       Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+       Write("\npreprocess::\n"),
+       Write("\npre-deps::\n"),
+       Write("\npre-app::\n"),
+       PatchHook = fun(Cmd) ->
+               case Cmd of
+                       "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+                       "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+                       "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+                       "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+                       _ -> Escape(Cmd)
+               end
+       end,
+       fun() ->
+               case lists:keyfind(pre_hooks, 1, Conf) of
+                       false -> ok;
+                       {_, Hooks} ->
+                               [case H of
+                                       {'get-deps', Cmd} ->
+                                               Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+                                       {compile, Cmd} ->
+                                               Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+                                       {Regex, compile, Cmd} ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+                                                       false -> ok
+                                               end;
+                                       _ -> ok
+                               end || H <- Hooks]
+               end
+       end(),
+       ShellToMk = fun(V) ->
+               re:replace(re:replace(V, "(\\\\$$)(\\\\w*)", "\\\\1(\\\\2)", [global]),
+                       "-Werror\\\\b", "", [{return, list}, global])
+       end,
+       PortSpecs = fun() ->
+               case lists:keyfind(port_specs, 1, Conf) of
+                       false ->
+                               case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+                                       false -> [];
+                                       true ->
+                                               [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+                                                       proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+                               end;
+                       {_, Specs} ->
+                               lists:flatten([case S of
+                                       {Output, Input} -> {ShellToMk(Output), Input, []};
+                                       {Regex, Output, Input} ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> {ShellToMk(Output), Input, []};
+                                                       false -> []
+                                               end;
+                                       {Regex, Output, Input, [{env, Env}]} ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> {ShellToMk(Output), Input, Env};
+                                                       false -> []
+                                               end
+                               end || S <- Specs])
+               end
+       end(),
+       PortSpecWrite = fun (Text) ->
+               file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+       end,
+       case PortSpecs of
+               [] -> ok;
+               _ ->
+                       Write("\npre-app::\n\t$$\(MAKE) -f c_src/Makefile.erlang.mk\n"),
+                       PortSpecWrite(io_lib:format("ERL_CFLAGS = -finline-functions -Wall -fPIC -I ~s/erts-~s/include -I ~s\n",
+                               [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+                       PortSpecWrite(io_lib:format("ERL_LDFLAGS = -L ~s -lerl_interface -lei\n",
+                               [code:lib_dir(erl_interface, lib)])),
+                       [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+                       FilterEnv = fun(Env) ->
+                               lists:flatten([case E of
+                                       {_, _} -> E;
+                                       {Regex, K, V} ->
+                                               case rebar_utils:is_arch(Regex) of
+                                                       true -> {K, V};
+                                                       false -> []
+                                               end
+                               end || E <- Env])
+                       end,
+                       MergeEnv = fun(Env) ->
+                               lists:foldl(fun ({K, V}, Acc) ->
+                                       case lists:keyfind(K, 1, Acc) of
+                                               false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+                                               {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+                                       end
+                               end, [], Env)
+                       end,
+                       PortEnv = case lists:keyfind(port_env, 1, Conf) of
+                               false -> [];
+                               {_, PortEnv0} -> FilterEnv(PortEnv0)
+                       end,
+                       PortSpec = fun ({Output, Input0, Env}) ->
+                               filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+                               Input = [[" ", I] || I <- Input0],
+                               PortSpecWrite([
+                                       [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+                                       case $(PLATFORM) of
+                                               darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+                                               _ -> ""
+                                       end,
+                                       "\n\nall:: ", Output, "\n\n",
+                                       "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+                                       "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+                                       "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+                                       "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+                                       [[Output, ": ", K, " = ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+                                       Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+                                               "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+                                       "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+                                       case filename:extension(Output) of
+                                               [] -> "\n";
+                                               _ -> " -shared\n"
+                                       end])
+                       end,
+                       [PortSpec(S) || S <- PortSpecs]
+       end,
+       Write("\ninclude $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(DEPS_DIR)/app)/erlang.mk"),
+       RunPlugin = fun(Plugin, Step) ->
+               case erlang:function_exported(Plugin, Step, 2) of
+                       false -> ok;
+                       true ->
+                               c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+                               Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+                                       dict:store(base_dir, "", dict:new())}, undefined),
+                               io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+               end
+       end,
+       fun() ->
+               case lists:keyfind(plugins, 1, Conf) of
+                       false -> ok;
+                       {_, Plugins} ->
+                               [begin
+                                       case lists:keyfind(deps, 1, Conf) of
+                                               false -> ok;
+                                               {_, Deps} ->
+                                                       case lists:keyfind(P, 1, Deps) of
+                                                               false -> ok;
+                                                               _ ->
+                                                                       Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+                                                                       io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+                                                                       io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+                                                                       code:add_patha(Path ++ "/ebin")
+                                                       end
+                                       end
+                               end || P <- Plugins],
+                               [case code:load_file(P) of
+                                       {module, P} -> ok;
+                                       _ ->
+                                               case lists:keyfind(plugin_dir, 1, Conf) of
+                                                       false -> ok;
+                                                       {_, PluginsDir} ->
+                                                               ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+                                                               {ok, P, Bin} = compile:file(ErlFile, [binary]),
+                                                               {module, P} = code:load_binary(P, ErlFile, Bin)
+                                               end
+                               end || P <- Plugins],
+                               [RunPlugin(P, preprocess) || P <- Plugins],
+                               [RunPlugin(P, pre_compile) || P <- Plugins],
+                               [RunPlugin(P, compile) || P <- Plugins]
+               end
+       end(),
+       halt()
+endef
+
+define dep_autopatch_app.erl
+       UpdateModules = fun(App) ->
+               case filelib:is_regular(App) of
+                       false -> ok;
+                       true ->
+                               {ok, [{application, '$(1)', L0}]} = file:consult(App),
+                               Mods = filelib:fold_files("$(call core_native_path,$(DEPS_DIR)/$1/src)", "\\\\.erl$$", true,
+                                       fun (F, Acc) -> [list_to_atom(filename:rootname(filename:basename(F)))|Acc] end, []),
+                               L = lists:keystore(modules, 1, L0, {modules, Mods}),
+                               ok = file:write_file(App, io_lib:format("~p.~n", [{application, '$(1)', L}]))
+               end
+       end,
+       UpdateModules("$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"),
+       halt()
+endef
+
+define dep_autopatch_appsrc.erl
+       AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+       AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+       case filelib:is_regular(AppSrcIn) of
+               false -> ok;
+               true ->
+                       {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+                       L1 = lists:keystore(modules, 1, L0, {modules, []}),
+                       L2 = case lists:keyfind(vsn, 1, L1) of {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, "git"}); _ -> L1 end,
+                       L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+                       ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+                       case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+       end,
+       halt()
+endef
+
+define dep_fetch_git
+       git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+       cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-submodule
+       git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+       hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+       cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+       svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+       cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_hex.erl
+       ssl:start(),
+       inets:start(),
+       {ok, {{_, 200, _}, _, Body}} = httpc:request(get,
+               {"https://s3.amazonaws.com/s3.hex.pm/tarballs/$(1)-$(2).tar", []},
+               [], [{body_format, binary}]),
+       {ok, Files} = erl_tar:extract({binary, Body}, [memory]),
+       {_, Source} = lists:keyfind("contents.tar.gz", 1, Files),
+       ok = erl_tar:extract({binary, Source}, [{cwd, "$(call core_native_path,$(DEPS_DIR)/$1)"}, compressed]),
+       halt()
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+       $(call erlang,$(call dep_fetch_hex.erl,$(1),$(strip $(word 2,$(dep_$(1))))));
+endef
+
+define dep_fetch_fail
+       echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+       exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+       $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+       git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+       cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_fetch
+       $(if $(dep_$(1)), \
+               $(if $(dep_fetch_$(word 1,$(dep_$(1)))), \
+                       $(word 1,$(dep_$(1))), \
+                       $(if $(IS_DEP),legacy,fail)), \
+               $(if $(filter $(1),$(PACKAGES)), \
+                       $(pkg_$(1)_fetch), \
+                       fail))
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1):
+       $(eval DEP_NAME := $(call dep_name,$1))
+       $(eval DEP_STR := $(if $(filter-out $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+       $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+               echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)."; \
+               exit 17; \
+       fi
+       $(verbose) mkdir -p $(DEPS_DIR)
+       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$1)),$1)
+       $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure.ac -o -f $(DEPS_DIR)/$(DEP_NAME)/configure.in ]; then \
+               echo " AUTO  " $(DEP_STR); \
+               cd $(DEPS_DIR)/$(DEP_NAME) && autoreconf -Wall -vif -I m4; \
+       fi
+       - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+               echo " CONF  " $(DEP_STR); \
+               cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+       fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+       $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+               if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+                       echo " PATCH  Downloading rabbitmq-codegen"; \
+                       git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+               fi; \
+               if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+                       echo " PATCH  Downloading rabbitmq-server"; \
+                       git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+               fi; \
+               ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+       elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+               if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+                       echo " PATCH  Downloading rabbitmq-codegen"; \
+                       git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+               fi \
+       else \
+               $$(call dep_autopatch,$(DEP_NAME)) \
+       fi
+endif
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               $(MAKE) -C $$dep clean IS_APP=1 || exit $$?; \
+       done
+
+distclean:: distclean-apps
+
+distclean-apps:
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               $(MAKE) -C $$dep distclean IS_APP=1 || exit $$?; \
+       done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+       $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/list-deps.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/list-doc-deps.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/list-rel-deps.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/list-test-deps.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/list-shell-deps.log
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+define core_dep_plugin
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endef
+
+$(foreach p,$(DEP_PLUGINS),\
+       $(eval $(if $(findstring /,$p),\
+               $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+               $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_SUFFIX ?= _dtl
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL   " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+define erlydtl_compile.erl
+       [begin
+               Module0 = case "$(strip $(DTL_FULL_PATH))" of
+                       "" ->
+                               filename:basename(F, ".dtl");
+                       _ ->
+                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
+                               re:replace(F2, "/",  "_",  [{return, list}, global])
+               end,
+               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+               case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
+                       ok -> ok;
+                       {ok, _} -> ok
+               end
+       end || F <- string:tokens("$(1)", " ")],
+       halt().
+endef
+
+ifneq ($(wildcard src/),)
+
+DTL_FILES = $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifdef DTL_FULL_PATH
+BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(subst /,_,$(DTL_FILES:$(DTL_PATH)%=%))))
+else
+BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(notdir $(DTL_FILES))))
+endif
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
+       @mkdir -p $(ERLANG_MK_TMP)
+       @if test -f $@; then \
+               touch $(DTL_FILES); \
+       fi
+       @touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+ebin/$(PROJECT).app:: $(DTL_FILES)
+       $(if $(strip $?),\
+               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?,-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/)))
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+define compile_proto
+       $(verbose) mkdir -p ebin/ include/
+       $(proto_verbose) $(call erlang,$(call compile_proto.erl,$(1)))
+       $(proto_verbose) erlc +debug_info -o ebin/ ebin/*.erl
+       $(verbose) rm ebin/*.erl
+endef
+
+define compile_proto.erl
+       [begin
+               Dir = filename:dirname(filename:dirname(F)),
+               protobuffs_compile:generate_source(F,
+                       [{output_include_dir, Dir ++ "/include"},
+                               {output_src_dir, Dir ++ "/ebin"}])
+       end || F <- string:tokens("$(1)", " ")],
+       halt().
+endef
+
+ifneq ($(wildcard src/),)
+ebin/$(PROJECT).app:: $(sort $(call core_find,src/,*.proto))
+       $(if $(strip $?),$(call compile_proto,$?))
+endif
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+       +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP   " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP   " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC  " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+       $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL  " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1  " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB   " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+ifeq ($(wildcard ebin/test),)
+app:: deps $(PROJECT).d
+       $(verbose) $(MAKE) --no-print-directory app-build
+else
+app:: clean deps $(PROJECT).d
+       $(verbose) $(MAKE) --no-print-directory app-build
+endif
+
+ifeq ($(wildcard src/$(PROJECT)_app.erl),)
+define app_file
+{application, $(PROJECT), [
+       {description, "$(PROJECT_DESCRIPTION)"},
+       {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+       {id$(comma)$(space)"$(1)"}$(comma))
+       {modules, [$(call comma_list,$(2))]},
+       {registered, []},
+       {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS))]}
+]}.
+endef
+else
+define app_file
+{application, $(PROJECT), [
+       {description, "$(PROJECT_DESCRIPTION)"},
+       {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+       {id$(comma)$(space)"$(1)"}$(comma))
+       {modules, [$(call comma_list,$(2))]},
+       {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+       {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS))]},
+       {mod, {$(PROJECT)_app, []}}
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+       $(verbose) :
+
+# Source files.
+
+ERL_FILES = $(sort $(call core_find,src/,*.erl))
+CORE_FILES = $(sort $(call core_find,src/,*.core))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+       $(verbose) mkdir -p include/
+       $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(1)
+       $(verbose) mv asn1/*.erl src/
+       $(verbose) mv asn1/*.hrl include/
+       $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+       $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+       $(verbose) mkdir -p include/ priv/mibs/
+       $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+       $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES = $(sort $(call core_find,src/,*.xrl))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES = $(sort $(call core_find,src/,*.yrl))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+       $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+       ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+       Modules = [{filename:basename(F, ".erl"), F} || F <- ErlFiles],
+       Add = fun (Dep, Acc) ->
+               case lists:keyfind(atom_to_list(Dep), 1, Modules) of
+                       {_, DepFile} -> [DepFile|Acc];
+                       false -> Acc
+               end
+       end,
+       AddHd = fun (Dep, Acc) ->
+               case {Dep, lists:keymember(Dep, 2, Modules)} of
+                       {"src/" ++ _, false} -> [Dep|Acc];
+                       {"include/" ++ _, false} -> [Dep|Acc];
+                       _ -> Acc
+               end
+       end,
+       CompileFirst = fun (Deps) ->
+               First0 = [case filename:extension(D) of
+                       ".erl" -> filename:basename(D, ".erl");
+                       _ -> []
+               end || D <- Deps],
+               case lists:usort(First0) of
+                       [] -> [];
+                       [[]] -> [];
+                       First -> ["COMPILE_FIRST +=", [[" ", F] || F <- First], "\n"]
+               end
+       end,
+       Depend = [begin
+               case epp:parse_file(F, ["include/"], []) of
+                       {ok, Forms} ->
+                               Deps = lists:usort(lists:foldl(fun
+                                       ({attribute, _, behavior, Dep}, Acc) -> Add(Dep, Acc);
+                                       ({attribute, _, behaviour, Dep}, Acc) -> Add(Dep, Acc);
+                                       ({attribute, _, compile, {parse_transform, Dep}}, Acc) -> Add(Dep, Acc);
+                                       ({attribute, _, file, {Dep, _}}, Acc) -> AddHd(Dep, Acc);
+                                       (_, Acc) -> Acc
+                               end, [], Forms)),
+                               case Deps of
+                                       [] -> "";
+                                       _ -> [F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n", CompileFirst(Deps)]
+                               end;
+                       {error, enoent} ->
+                               []
+               end
+       end || F <- ErlFiles],
+       ok = file:write_file("$(1)", Depend),
+       halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+       $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST)
+       @mkdir -p $(ERLANG_MK_TMP)
+       @if test -f $@; then \
+               touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+               touch -c $(PROJECT).d; \
+       fi
+       @touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+
+-include $(PROJECT).d
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+       $(verbose) mkdir -p ebin/
+
+define compile_erl
+       $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+               -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+       $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+       $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+       $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null || true))
+       $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+               $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+       $(app_verbose) printf "$(subst $(newline),\n,$(subst ",\",$(call app_file,$(GITDESCRIBE),$(MODULES))))" \
+               > ebin/$(PROJECT).app
+else
+       $(verbose) if [ -z "$$(grep -E '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+               echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk README for instructions." >&2; \
+               exit 1; \
+       fi
+       $(appsrc_verbose) cat src/$(PROJECT).app.src \
+               | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+               | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(GITDESCRIBE)\"}/" \
+               > ebin/$(PROJECT).app
+endif
+
+clean:: clean-app
+
+clean-app:
+       $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+               $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+               $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+               $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+               $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+       $(verbose) for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+       $(verbose) for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+       $(verbose) for dep in $(ALL_TEST_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir:
+       $(gen_verbose) erlc -v $(TEST_ERLC_OPTS) -I include/ -o $(TEST_DIR) \
+               $(call core_find,$(TEST_DIR)/,*.erl) -pa ebin/
+endif
+
+ifeq ($(wildcard ebin/test),)
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: clean deps test-deps $(PROJECT).d
+       $(verbose) $(MAKE) --no-print-directory app-build test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+       $(gen_verbose) touch ebin/test
+else
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: deps test-deps $(PROJECT).d
+       $(verbose) $(MAKE) --no-print-directory app-build test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+       $(gen_verbose) rm -f $(TEST_DIR)/*.beam
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+       $(if $(findstring +,$1),\
+               $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_rebar_config
+{deps, [$(call comma_list,$(foreach d,$(DEPS),\
+       {$(call dep_name,$d),".*",{git,"$(call dep_repo,$d)","$(call dep_commit,$d)"}}))]}.
+{erl_opts, [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$(ERLC_OPTS)),\
+       $(call compat_convert_erlc_opts,$o)))]}.
+endef
+
+$(eval _compat_rebar_config = $$(compat_rebar_config))
+$(eval export _compat_rebar_config)
+
+rebar.config:
+       $(gen_verbose) echo "$${_compat_rebar_config}" > rebar.config
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+
+docs:: asciidoc
+
+asciidoc: distclean-asciidoc doc-deps asciidoc-guide asciidoc-manual
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide:
+       a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+       a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+endif
+
+ifeq ($(wildcard doc/src/manual/*.asciidoc),)
+asciidoc-manual:
+else
+asciidoc-manual:
+       for f in doc/src/manual/*.asciidoc ; do \
+               a2x -v -f manpage $$f ; \
+       done
+       for s in $(MAN_SECTIONS); do \
+               mkdir -p doc/man$$s/ ; \
+               mv doc/src/manual/*.$$s doc/man$$s/ ; \
+               gzip doc/man$$s/*.$$s ; \
+       done
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+       for s in $(MAN_SECTIONS); do \
+               mkdir -p $(MAN_INSTALL_PATH)/man$$s/ ; \
+               install -g 0 -o 0 -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
+       done
+endif
+
+distclean:: distclean-asciidoc
+
+distclean-asciidoc:
+       $(gen_verbose) rm -rf doc/html/ doc/guide.pdf doc/man3/ doc/man7/
+
+# Copyright (c) 2014-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Bootstrap targets:" \
+               "  bootstrap          Generate a skeleton of an OTP application" \
+               "  bootstrap-lib      Generate a skeleton of an OTP library" \
+               "  bootstrap-rel      Generate the files needed to build a release" \
+               "  new-app n=NAME     Create a new local OTP application NAME" \
+               "  new-lib n=NAME     Create a new local OTP library NAME" \
+               "  new t=TPL n=NAME   Generate a module NAME based on the template TPL" \
+               "  new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+               "  list-templates     List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+       {description, ""},
+       {vsn, "0.1.0"},
+       {id, "git"},
+       {modules, []},
+       {registered, []},
+       {applications, [
+               kernel,
+               stdlib
+       ]},
+       {mod, {$p_app, []}},
+       {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+       {description, ""},
+       {vsn, "0.1.0"},
+       {id, "git"},
+       {modules, []},
+       {registered, []},
+       {applications, [
+               kernel,
+               stdlib
+       ]}
+]}.
+endef
+
+ifdef SP
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+
+include erlang.mk
+endef
+else
+define bs_Makefile
+PROJECT = $p
+include erlang.mk
+endef
+endif
+
+define bs_apps_Makefile
+PROJECT = $p
+include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+       $p_sup:start_link().
+
+stop(_State) ->
+       ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p]}.
+{extended_start_script, true}.
+{sys_config, "rel/sys.config"}.
+{vm_args, "rel/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+       supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+       Procs = [],
+       {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+       gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+       {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+       {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+       {noreply, State}.
+
+handle_info(_Info, State) ->
+       {noreply, State}.
+
+terminate(_Reason, _State) ->
+       ok.
+
+code_change(_OldVsn, State, _Extra) ->
+       {ok, State}.
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+       {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+       {ok, Req2} = cowboy_req:reply(200, Req),
+       {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+       ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+       gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+       {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+       {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+       {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+       {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+       {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+       {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+       ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+       {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+       {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+       {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+       ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+       {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+       {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+       {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+       {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+       Req2 = cowboy_req:compact(Req),
+       {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+       {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+       {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+       {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+       {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+       ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+       socket :: inet:socket(),
+       transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+       Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+       {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+       ok = ranch:accept_ack(Ref),
+       loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+       loop(State).
+endef
+
+# Plugin-specific targets.
+
+define render_template
+       $(verbose) printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+       $(error Error: src/ directory already exists)
+endif
+       $(eval p := $(PROJECT))
+       $(eval n := $(PROJECT)_sup)
+       $(call render_template,bs_Makefile,Makefile)
+       $(verbose) mkdir src/
+ifdef LEGACY
+       $(call render_template,bs_appsrc,src/$(PROJECT).app.src)
+endif
+       $(call render_template,bs_app,src/$(PROJECT)_app.erl)
+       $(call render_template,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+       $(error Error: src/ directory already exists)
+endif
+       $(eval p := $(PROJECT))
+       $(call render_template,bs_Makefile,Makefile)
+       $(verbose) mkdir src/
+ifdef LEGACY
+       $(call render_template,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+       $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard rel/),)
+       $(error Error: rel/ directory already exists)
+endif
+       $(eval p := $(PROJECT))
+       $(call render_template,bs_relx_config,relx.config)
+       $(verbose) mkdir rel/
+       $(call render_template,bs_sys_config,rel/sys.config)
+       $(call render_template,bs_vm_args,rel/vm.args)
+
+new-app:
+ifndef in
+       $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+       $(error Error: Application $in already exists)
+endif
+       $(eval p := $(in))
+       $(eval n := $(in)_sup)
+       $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+       $(call render_template,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+       $(call render_template,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+       $(call render_template,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+       $(call render_template,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+       $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+       $(error Error: Application $in already exists)
+endif
+       $(eval p := $(in))
+       $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+       $(call render_template,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+       $(call render_template,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+       $(error Error: src/ directory does not exist)
+endif
+ifndef t
+       $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef tpl_$(t)
+       $(error Unknown template)
+endif
+ifndef n
+       $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+       $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new t=$t n=$n in=
+else
+       $(call render_template,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+       $(verbose) echo Available templates: $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT).so
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),darwin)
+       CC ?= cc
+       CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall
+       LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+       CC ?= cc
+       CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+       CC ?= gcc
+       CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+
+LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C     " $(?F);
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP   " $(?F);
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD    " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+       $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+       $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+
+$(C_SRC_OUTPUT): $(OBJECTS)
+       $(verbose) mkdir -p priv/
+       $(link_verbose) $(CC) $(OBJECTS) \
+               $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+               -o $(C_SRC_OUTPUT)
+
+%.o: %.c
+       $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+       $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+       $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+       $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+       $(gen_verbose) rm -f $(C_SRC_OUTPUT) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+$(C_SRC_ENV):
+       $(verbose) $(ERL) -eval "file:write_file(\"$(C_SRC_ENV)\", \
+               io_lib:format( \
+                       \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+                       \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+                       \"ERL_INTERFACE_LIB_DIR ?= ~s~n\", \
+                       [code:root_dir(), erlang:system_info(version), \
+                       code:lib_dir(erl_interface, include), \
+                       code:lib_dir(erl_interface, lib)])), \
+               halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+       $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+       /* Initialize private data. */
+       *priv_data = NULL;
+
+       loads++;
+
+       return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+       /* Convert the private data to the new version. */
+       *priv_data = *old_priv_data;
+
+       loads++;
+
+       return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+       if (loads == 1) {
+               /* Destroy the private data. */
+       }
+
+       loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+       if (enif_is_atom(env, argv[0])) {
+               return enif_make_tuple2(env,
+                       enif_make_atom(env, "hello"),
+                       argv[0]);
+       }
+
+       return enif_make_tuple2(env,
+               enif_make_atom(env, "error"),
+               enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+       {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+       PrivDir = case code:priv_dir(?MODULE) of
+               {error, _} ->
+                       AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+                       filename:join(AppPath, "priv");
+               Path ->
+                       Path
+       end,
+       erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+       erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+       $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+       $(error Error: src/$n.erl already exists)
+endif
+ifdef in
+       $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+       $(verbose) mkdir -p $(C_SRC_DIR) src/
+       $(call render_template,bs_c_nif,$(C_SRC_DIR)/$n.c)
+       $(call render_template,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-setup distclean-kerl
+
+KERL ?= $(CURDIR)/kerl
+export KERL
+
+KERL_URL ?= https://raw.githubusercontent.com/yrashk/kerl/master/kerl
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+CI_INSTALL_DIR ?= $(HOME)/erlang
+CI_OTP ?=
+
+ifeq ($(strip $(CI_OTP)),)
+ci::
+else
+ci:: $(addprefix ci-,$(CI_OTP))
+
+ci-prepare: $(addprefix $(CI_INSTALL_DIR)/,$(CI_OTP))
+
+ci-setup::
+
+ci_verbose_0 = @echo " CI    " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$(1): $(CI_INSTALL_DIR)/$(1)
+       $(ci_verbose) \
+               PATH="$(CI_INSTALL_DIR)/$(1)/bin:$(PATH)" \
+               CI_OTP_RELEASE="$(1)" \
+               CT_OPTS="-label $(1)" \
+               $(MAKE) clean ci-setup tests
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp))))
+
+define ci_otp_target
+ifeq ($(wildcard $(CI_INSTALL_DIR)/$(1)),)
+$(CI_INSTALL_DIR)/$(1): $(KERL)
+       $(KERL) build git $(OTP_GIT) $(1) $(1)
+       $(KERL) install $(1) $(CI_INSTALL_DIR)/$(1)
+endif
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_otp_target,$(otp))))
+
+$(KERL):
+       $(gen_verbose) $(call core_http_get,$(KERL),$(KERL_URL))
+       $(verbose) chmod +x $(KERL)
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Continuous Integration targets:" \
+               "  ci          Run '$(MAKE) tests' on all configured Erlang versions." \
+               "" \
+               "The CI_OTP variable must be defined with the Erlang versions" \
+               "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+       $(gen_verbose) rm -rf $(KERL)
+endif
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+ifneq ($(wildcard $(TEST_DIR)),)
+       CT_SUITES ?= $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+else
+       CT_SUITES ?=
+endif
+
+# Core targets.
+
+tests:: ct
+
+distclean:: distclean-ct
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Common_test targets:" \
+               "  ct          Run all the common_test suites for this project" \
+               "" \
+               "All your common_test suites have their associated targets." \
+               "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+       -no_auto_compile \
+       -noinput \
+       -pa $(CURDIR)/ebin $(DEPS_DIR)/*/ebin $(TEST_DIR) \
+       -dir $(TEST_DIR) \
+       -logdir $(CURDIR)/logs
+
+ifeq ($(CT_SUITES),)
+ct:
+else
+ct: test-build
+       $(verbose) mkdir -p $(CURDIR)/logs/
+       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+       $(verbose) mkdir -p $(CURDIR)/logs/
+       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+       $(gen_verbose) rm -rf $(CURDIR)/logs/
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r src
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \
+       -Wunmatched_returns # -Wunderspecs
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Dialyzer targets:" \
+               "  plt         Build a PLT file for this project" \
+               "  dialyze     Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+$(DIALYZER_PLT): deps app
+       $(verbose) dialyzer --build_plt --apps erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS)
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+       $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze:
+else
+dialyze: $(DIALYZER_PLT)
+endif
+       $(verbose) dialyzer --no_native $(DIALYZER_DIRS) $(DIALYZER_OPTS)
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+
+# Core targets.
+
+docs:: distclean-edoc edoc
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: doc-deps
+       $(gen_verbose) $(ERL) -eval 'edoc:application($(PROJECT), ".", [$(EDOC_OPTS)]), halt().'
+
+distclean-edoc:
+       $(gen_verbose) rm -f doc/*.css doc/*.html doc/*.png doc/edoc-info
+
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: elvis distclean-elvis
+
+# Configuration.
+
+ELVIS_CONFIG ?= $(CURDIR)/elvis.config
+
+ELVIS ?= $(CURDIR)/elvis
+export ELVIS
+
+ELVIS_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis
+ELVIS_CONFIG_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis.config
+ELVIS_OPTS ?=
+
+# Core targets.
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Elvis targets:" \
+               "  elvis       Run Elvis using the local elvis.config or download the default otherwise"
+
+distclean:: distclean-elvis
+
+# Plugin-specific targets.
+
+$(ELVIS):
+       $(gen_verbose) $(call core_http_get,$(ELVIS),$(ELVIS_URL))
+       $(verbose) chmod +x $(ELVIS)
+
+$(ELVIS_CONFIG):
+       $(verbose) $(call core_http_get,$(ELVIS_CONFIG),$(ELVIS_CONFIG_URL))
+
+elvis: $(ELVIS) $(ELVIS_CONFIG)
+       $(verbose) $(ELVIS) rock -c $(ELVIS_CONFIG) $(ELVIS_OPTS)
+
+distclean-elvis:
+       $(gen_verbose) rm -rf $(ELVIS)
+
+# Copyright (c) 2014 Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+
+ESCRIPT_BEAMS ?= "ebin/*", "deps/*/ebin/*"
+ESCRIPT_SYS_CONFIG ?= "rel/sys.config"
+ESCRIPT_EMU_ARGS ?= -pa . \
+       -sasl errlog_type error \
+       -escript main $(ESCRIPT_NAME)
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_STATIC ?= "deps/*/priv/**", "priv/**"
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Escript targets:" \
+               "  escript     Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+# Based on https://github.com/synrc/mad/blob/master/src/mad_bundle.erl
+# Copyright (c) 2013 Maxim Sokhatsky, Synrc Research Center
+# Modified MIT License, https://github.com/synrc/mad/blob/master/LICENSE :
+# Software may only be used for the great good and the true happiness of all
+# sentient beings.
+
+define ESCRIPT_RAW
+'Read = fun(F) -> {ok, B} = file:read_file(filename:absname(F)), B end,'\
+'Files = fun(L) -> A = lists:concat([filelib:wildcard(X)||X<- L ]),'\
+'  [F || F <- A, not filelib:is_dir(F) ] end,'\
+'Squash = fun(L) -> [{filename:basename(F), Read(F) } || F <- L ] end,'\
+'Zip = fun(A, L) -> {ok,{_,Z}} = zip:create(A, L, [{compress,all},memory]), Z end,'\
+'Ez = fun(Escript) ->'\
+'  Static = Files([$(ESCRIPT_STATIC)]),'\
+'  Beams = Squash(Files([$(ESCRIPT_BEAMS), $(ESCRIPT_SYS_CONFIG)])),'\
+'  Archive = Beams ++ [{ "static.gz", Zip("static.gz", Static)}],'\
+'  escript:create(Escript, [ $(ESCRIPT_OPTIONS)'\
+'    {archive, Archive, [memory]},'\
+'    {shebang, "$(ESCRIPT_SHEBANG)"},'\
+'    {comment, "$(ESCRIPT_COMMENT)"},'\
+'    {emu_args, " $(ESCRIPT_EMU_ARGS)"}'\
+'  ]),'\
+'  file:change_mode(Escript, 8#755)'\
+'end,'\
+'Ez("$(ESCRIPT_NAME)"),'\
+'halt().'
+endef
+
+ESCRIPT_COMMAND = $(subst ' ',,$(ESCRIPT_RAW))
+
+escript:: distclean-escript deps app
+       $(gen_verbose) $(ERL) -eval $(ESCRIPT_COMMAND)
+
+distclean-escript:
+       $(gen_verbose) rm -f $(ESCRIPT_NAME)
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel distclean-relx-rel distclean-relx run
+
+# Configuration.
+
+RELX ?= $(CURDIR)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://github.com/erlware/relx/releases/download/v3.5.0/relx
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+       RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+       RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+endif
+endif
+
+distclean:: distclean-relx-rel distclean-relx
+
+# Plugin-specific targets.
+
+$(RELX):
+       $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+       $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+       $(verbose) $(RELX) -c $(RELX_CONFIG) $(RELX_OPTS)
+
+distclean-relx-rel:
+       $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+distclean-relx:
+       $(gen_verbose) rm -rf $(RELX)
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run:
+else
+
+define get_relx_release.erl
+       {ok, Config} = file:consult("$(RELX_CONFIG)"),
+       {release, {Name, _}, _} = lists:keyfind(release, 1, Config),
+       io:format("~s", [Name]),
+       halt(0).
+endef
+
+RELX_RELEASE = `$(call erlang,$(get_relx_release.erl))`
+
+run: all
+       $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_RELEASE)/bin/$(RELX_RELEASE) console
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Relx targets:" \
+               "  run         Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(APPS_DIR)/*/ebin $(DEPS_DIR)/*/ebin
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Shell targets:" \
+               "  shell       Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+       $(verbose) for dep in $(ALL_SHELL_DEPS_DIRS) ; do $(MAKE) -C $$dep ; done
+
+shell: build-shell-deps
+       $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+       code:add_pathsa(["$(CURDIR)/ebin", "$(DEPS_DIR)/*/ebin"]),
+       try
+               case $(1) of
+                       all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+                       module -> triq:check($(2));
+                       function -> triq:check($(2))
+               end
+       of
+               true -> halt(0);
+               _ -> halt(1)
+       catch error:undef ->
+               io:format("Undefined property or module~n"),
+               halt(0)
+       end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build
+       $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build
+       $(verbose) echo Testing $(t)/0
+       $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build
+       $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename $(wildcard ebin/*.beam))))))
+       $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+       XREF_ARGS :=
+else
+       XREF_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/0.2.2/xrefr
+
+# Core targets.
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Xref targets:" \
+               "  xref        Run Xrefr using $XREF_CONFIG as config file if defined"
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+       $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+       $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+       $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+       $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR = cover
+
+# Hook in coverage to ct
+
+ifdef COVER
+ifdef CT_RUN
+# All modules in 'ebin'
+COVER_MODS = $(notdir $(basename $(call core_ls,ebin/*.beam)))
+
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec:
+       $(verbose) echo Cover mods: $(COVER_MODS)
+       $(gen_verbose) printf "%s\n" \
+               '{incl_mods,[$(subst $(space),$(comma),$(COVER_MODS))]}.' \
+               '{export,"$(CURDIR)/ct.coverdata"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+       $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+       $(verbose) printf "%s\n" "" \
+               "Cover targets:" \
+               "  cover-report  Generate a HTML coverage report from previously collected" \
+               "                cover data." \
+               "  all.coverdata Merge {eunit,ct}.coverdata into one coverdata file." \
+               "" \
+               "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+               "target tests additionally generates a HTML coverage report from the combined" \
+               "coverdata files from each of these testing tools. HTML reports can be disabled" \
+               "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out all.coverdata,$(wildcard *.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+       $(gen_verbose) rm -f *.coverdata ct.cover.spec
+
+# Merge all coverdata files into one.
+all.coverdata: $(COVERDATA)
+       $(gen_verbose) $(ERL) -eval ' \
+               $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),) \
+               cover:export("$@"), halt(0).'
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+       $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+       grep -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+       | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+       $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+       Ms = cover:imported_modules(),
+       [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+               ++ ".COVER.html", [html])  || M <- Ms],
+       Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+       EunitHrlMods = [$(EUNIT_HRL_MODS)],
+       Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+               true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+       TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+       TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+       TotalPerc = round(100 * TotalY / (TotalY + TotalN)),
+       {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+       io:format(F, "<!DOCTYPE html><html>~n"
+               "<head><meta charset=\"UTF-8\">~n"
+               "<title>Coverage report</title></head>~n"
+               "<body>~n", []),
+       io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+       io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+       [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+               "<td>~p%</td></tr>~n",
+               [M, M, round(100 * Y / (Y + N))]) || {M, {Y, N}} <- Report1],
+       How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+       Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+       io:format(F, "</table>~n"
+               "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+               "</body></html>", [How, Date]),
+       halt().
+endef
+
+cover-report:
+       $(gen_verbose) mkdir -p $(COVER_REPORT_DIR)
+       $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+       fetch-shell-deps
+
+ifneq ($(SKIP_DEPS),)
+fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps fetch-shell-deps:
+       @:
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+fetch-deps: $(ALL_DEPS_DIRS)
+fetch-doc-deps: $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+fetch-rel-deps: $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+fetch-test-deps: $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+fetch-shell-deps: $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+fetch-deps: $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+fetch-deps: $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+fetch-deps: $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+fetch-deps: $(ALL_SHELL_DEPS_DIRS)
+endif
+
+fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps fetch-shell-deps:
+ifndef IS_APP
+       $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+               $(MAKE) -C $$dep $@ IS_APP=1 || exit $$?; \
+       done
+endif
+ifneq ($(IS_DEP),1)
+       $(verbose) rm -f $(ERLANG_MK_TMP)/$@.log
+endif
+       $(verbose) mkdir -p $(ERLANG_MK_TMP)
+       $(verbose) for dep in $^ ; do \
+               if ! grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/$@.log; then \
+                       echo $$dep >> $(ERLANG_MK_TMP)/$@.log; \
+                       if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk)$$" \
+                        $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+                               $(MAKE) -C $$dep fetch-deps IS_DEP=1 || exit $$?; \
+                       fi \
+               fi \
+       done
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+       list-shell-deps
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+       $(verbose) :> $@
+else
+LIST_DIRS = $(ALL_DEPS_DIRS)
+LIST_DEPS = $(BUILD_DEPS) $(DEPS)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): fetch-deps
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): LIST_DIRS += $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): LIST_DEPS += $(DOC_DEPS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): fetch-doc-deps
+else
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): fetch-deps
+endif
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): LIST_DIRS += $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): LIST_DEPS += $(REL_DEPS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): fetch-rel-deps
+else
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): fetch-deps
+endif
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): LIST_DIRS += $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): LIST_DEPS += $(TEST_DEPS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): fetch-test-deps
+else
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): fetch-deps
+endif
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): LIST_DIRS += $(ALL_SHELL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): LIST_DEPS += $(SHELL_DEPS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): fetch-shell-deps
+else
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): fetch-deps
+endif
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ifneq ($(IS_DEP),1)
+       $(verbose) rm -f $@.orig
+endif
+ifndef IS_APP
+       $(verbose) for app in $(filter-out $(CURDIR),$(ALL_APPS_DIRS)); do \
+               $(MAKE) -C "$$app" --no-print-directory $@ IS_APP=1 || :; \
+       done
+endif
+       $(verbose) for dep in $(filter-out $(CURDIR),$(LIST_DIRS)); do \
+               if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk)$$" \
+                $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+                       $(MAKE) -C "$$dep" --no-print-directory $@ IS_DEP=1; \
+               fi; \
+       done
+       $(verbose) for dep in $(LIST_DEPS); do \
+               echo $(DEPS_DIR)/$$dep; \
+       done >> $@.orig
+ifndef IS_APP
+ifneq ($(IS_DEP),1)
+       $(verbose) sort < $@.orig | uniq > $@
+       $(verbose) rm -f $@.orig
+endif
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+ifneq ($(SKIP_DEPS),)
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+       @:
+else
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(IS_DEP),1)
+ifneq ($(filter doc,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+endif
+endif
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+       $(verbose) cat $^ | sort | uniq
+endif # ifneq ($(SKIP_DEPS),)
diff --git a/rabbitmq-server/deps/rabbitmq_trust_store/rabbitmq-components.mk b/rabbitmq-server/deps/rabbitmq_trust_store/rabbitmq-components.mk
new file mode 100644 (file)
index 0000000..eb9e9e3
--- /dev/null
@@ -0,0 +1,345 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# Automatically add rabbitmq-common to the dependencies, at least for
+# the Makefiles.
+ifneq ($(PROJECT),rabbit_common)
+ifneq ($(PROJECT),rabbitmq_public_umbrella)
+ifeq ($(filter rabbit_common,$(DEPS)),)
+DEPS += rabbit_common
+endif
+endif
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client                       = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit                            = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common                     = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0                  = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp        = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http        = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap        = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl       = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser    = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_clusterer                = git_rmq rabbitmq-clusterer $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen                  = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client            = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange      = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes        = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management        = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
+dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella          = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# FIXME: As of 2015-11-20, we depend on Ranch 1.2.1, but erlang.mk
+# defaults to Ranch 1.1.0. All projects depending indirectly on Ranch
+# needs to add "ranch" as a BUILD_DEPS. The list of projects needing
+# this workaround are:
+#     o  rabbitmq-web-stomp
+dep_ranch = git https://github.com/ninenines/ranch 1.2.1
+
+RABBITMQ_COMPONENTS = amqp_client \
+                     rabbit \
+                     rabbit_common \
+                     rabbitmq_amqp1_0 \
+                     rabbitmq_auth_backend_amqp \
+                     rabbitmq_auth_backend_http \
+                     rabbitmq_auth_backend_ldap \
+                     rabbitmq_auth_mechanism_ssl \
+                     rabbitmq_boot_steps_visualiser \
+                     rabbitmq_clusterer \
+                     rabbitmq_codegen \
+                     rabbitmq_consistent_hash_exchange \
+                     rabbitmq_delayed_message_exchange \
+                     rabbitmq_dotnet_client \
+                     rabbitmq_event_exchange \
+                     rabbitmq_federation \
+                     rabbitmq_federation_management \
+                     rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
+                     rabbitmq_lvc \
+                     rabbitmq_management \
+                     rabbitmq_management_agent \
+                     rabbitmq_management_exchange \
+                     rabbitmq_management_themes \
+                     rabbitmq_management_visualiser \
+                     rabbitmq_message_timestamp \
+                     rabbitmq_metronome \
+                     rabbitmq_mqtt \
+                     rabbitmq_objc_client \
+                     rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
+                     rabbitmq_rtopic_exchange \
+                     rabbitmq_sharding \
+                     rabbitmq_shovel \
+                     rabbitmq_shovel_management \
+                     rabbitmq_stomp \
+                     rabbitmq_test \
+                     rabbitmq_toke \
+                     rabbitmq_top \
+                     rabbitmq_tracing \
+                     rabbitmq_trust_store \
+                     rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
+                     rabbitmq_web_stomp \
+                     rabbitmq_web_stomp_examples \
+                     rabbitmq_website
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+       ref=$$(git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+       if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+base_rmq_ref := $(shell \
+       (git rev-parse --verify -q stable >/dev/null && \
+         git merge-base --is-ancestor $$(git merge-base master HEAD) stable && \
+         echo stable) || \
+       echo master)
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+#   - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+#   target's properties:
+#       eg. rabbitmq-common is replaced by rabbitmq-codegen
+#       eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Maccro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+#   1. /foo.git -> /bar.git
+#   2. /foo     -> /bar
+#   3. /foo/    -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)),                                    \
+                 $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))),       \
+                 $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+       fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+       fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+       if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+        git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+           fetch_url="$$$$fetch_url1"; \
+           push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+       elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+           fetch_url="$$$$fetch_url2"; \
+           push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+       fi; \
+       cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+       $(foreach ref,$(call dep_rmq_commits,$(1)), \
+         git checkout -q $(ref) >/dev/null 2>&1 || \
+         ) \
+       (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+         1>&2 && false) ) && \
+       (test "$$$$fetch_url" = "$$$$push_url" || \
+        git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+       @:
+
+prepare-dist::
+       @:
+
+# --------------------------------------------------------------------
+# Run a RabbitMQ node (moved from rabbitmq-run.mk as a workaround).
+# --------------------------------------------------------------------
+
+# Add "rabbit" to the build dependencies when the user wants to start
+# a broker or to the test dependencies when the user wants to test a
+# project.
+#
+# NOTE: This should belong to rabbitmq-run.mk. Unfortunately, it is
+# loaded *after* erlang.mk which is too late to add a dependency. That's
+# why rabbitmq-components.mk knows the list of targets which start a
+# broker and add "rabbit" to the dependencies in this case.
+
+ifneq ($(PROJECT),rabbit)
+ifeq ($(filter rabbit,$(DEPS) $(BUILD_DEPS)),)
+RUN_RMQ_TARGETS = run-broker \
+                 run-background-broker \
+                 run-node \
+                 run-background-node \
+                 start-background-node
+
+ifneq ($(filter $(RUN_RMQ_TARGETS),$(MAKECMDGOALS)),)
+BUILD_DEPS += rabbit
+endif
+endif
+
+ifeq ($(filter rabbit,$(DEPS) $(BUILD_DEPS) $(TEST_DEPS)),)
+ifneq ($(filter check tests tests-with-broker test,$(MAKECMDGOALS)),)
+TEST_DEPS += rabbit
+endif
+endif
+endif
+
+ifeq ($(filter rabbit_public_umbrella amqp_client rabbit_common rabbitmq_test,$(PROJECT)),)
+ifeq ($(filter rabbitmq_test,$(DEPS) $(BUILD_DEPS) $(TEST_DEPS)),)
+TEST_DEPS += rabbitmq_test
+endif
+endif
+
+# --------------------------------------------------------------------
+# rabbitmq-components.mk checks.
+# --------------------------------------------------------------------
+
+ifeq ($(PROJECT),rabbit_common)
+else ifdef SKIP_RMQCOMP_CHECK
+else ifeq ($(IS_DEP),1)
+else ifneq ($(filter co up,$(MAKECMDGOALS)),)
+else
+# In all other cases, rabbitmq-components.mk must be in sync.
+deps:: check-rabbitmq-components.mk
+fetch-deps: check-rabbitmq-components.mk
+endif
+
+# If this project is under the Umbrella project, we override $(DEPS_DIR)
+# to point to the Umbrella's one. We also disable `make distclean` so
+# $(DEPS_DIR) is not accidentally removed.
+
+ifneq ($(wildcard ../../UMBRELLA.md),)
+UNDER_UMBRELLA = 1
+else ifneq ($(wildcard UMBRELLA.md),)
+UNDER_UMBRELLA = 1
+endif
+
+ifeq ($(UNDER_UMBRELLA),1)
+ifneq ($(PROJECT),rabbitmq_public_umbrella)
+DEPS_DIR ?= $(abspath ..)
+
+distclean:: distclean-components
+       @:
+
+distclean-components:
+endif
+
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
+
+UPSTREAM_RMQ_COMPONENTS_MK = $(DEPS_DIR)/rabbit_common/mk/rabbitmq-components.mk
+
+check-rabbitmq-components.mk:
+       $(verbose) cmp -s rabbitmq-components.mk \
+               $(UPSTREAM_RMQ_COMPONENTS_MK) || \
+               (echo "error: rabbitmq-components.mk must be updated!" 1>&2; \
+                 false)
+
+ifeq ($(PROJECT),rabbit_common)
+rabbitmq-components-mk:
+       @:
+else
+rabbitmq-components-mk:
+       $(gen_verbose) cp -a $(UPSTREAM_RMQ_COMPONENTS_MK) .
+ifeq ($(DO_COMMIT),yes)
+       $(verbose) git diff --quiet rabbitmq-components.mk \
+       || git commit -m 'Update rabbitmq-components.mk' rabbitmq-components.mk
+endif
+endif
diff --git a/rabbitmq-server/deps/rabbitmq_trust_store/src/rabbit_trust_store.erl b/rabbitmq-server/deps/rabbitmq_trust_store/src/rabbit_trust_store.erl
new file mode 100644 (file)
index 0000000..d10c857
--- /dev/null
@@ -0,0 +1,282 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_trust_store).
+-behaviour(gen_server).
+
+-export([mode/0, refresh/0, list/0]). %% Console Interface.
+-export([whitelisted/3, is_whitelisted/1]). %% Client-side Interface.
+-export([start/1, start_link/1]).
+-export([init/1, terminate/2,
+         handle_call/3, handle_cast/2,
+         handle_info/2,
+         code_change/3]).
+
+-include_lib("kernel/include/file.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("public_key/include/public_key.hrl").
+
+-type certificate() :: #'OTPCertificate'{}.
+-type event()       :: valid_peer
+                     | valid
+                     | {bad_cert, Other :: atom()
+                                | unknown_ca
+                                | selfsigned_peer}
+                     | {extension, #'Extension'{}}.
+-type state()       :: confirmed | continue.
+-type outcome()     :: {valid, state()}
+                     | {fail, Reason :: term()}
+                     | {unknown, state()}.
+
+-record(entry, {filename :: string(), identifier :: tuple(), change_time :: integer()}).
+-record(state, {directory_change_time :: integer(), whitelist_directory :: string(), refresh_interval :: integer()}).
+
+
+%% OTP Supervision
+
+start(Settings) ->
+    gen_server:start(?MODULE, Settings, []).
+
+start_link(Settings) ->
+    gen_server:start_link({local, trust_store}, ?MODULE, Settings, []).
+
+
+%% Console Interface
+
+-spec mode() -> 'automatic' | 'manual'.
+mode() ->
+    gen_server:call(trust_store, mode).
+
+-spec refresh() -> integer().
+refresh() ->
+    gen_server:call(trust_store, refresh).
+
+-spec list() -> string().
+list() ->
+    gen_server:call(trust_store, list).
+
+%% Client (SSL Socket) Interface
+
+-spec whitelisted(certificate(), event(), state()) -> outcome().
+whitelisted(_, {bad_cert, unknown_ca}, confirmed) ->
+    {valid, confirmed};
+whitelisted(#'OTPCertificate'{}=C, {bad_cert, unknown_ca}, continue) ->
+    case is_whitelisted(C) of
+        true ->
+            {valid, confirmed};
+        false ->
+            {fail, "CA not known AND certificate not whitelisted"}
+    end;
+whitelisted(#'OTPCertificate'{}=C, {bad_cert, selfsigned_peer}, continue) ->
+    case is_whitelisted(C) of
+        true ->
+            {valid, confirmed};
+        false ->
+            {fail, "certificate not whitelisted"}
+    end;
+whitelisted(_, {bad_cert, _} = Reason, _) ->
+    {fail, Reason};
+whitelisted(_, valid, St) ->
+    {valid, St};
+whitelisted(#'OTPCertificate'{}=_, valid_peer, St) ->
+    {valid, St};
+whitelisted(_, {extension, _}, St) ->
+    {unknown, St}.
+
+-spec is_whitelisted(certificate()) -> boolean().
+is_whitelisted(#'OTPCertificate'{}=C) ->
+    #entry{identifier = Id} = extract_unique_attributes(C),
+    ets:member(table_name(), Id).
+
+
+%% Generic Server Callbacks
+
+init(Settings) ->
+    erlang:process_flag(trap_exit, true),
+    ets:new(table_name(), table_options()),
+    Path = path(Settings),
+    Interval = refresh_interval(Settings),
+    Initial = modification_time(Path),
+    tabulate(Path),
+    if
+        Interval =:= 0 ->
+            ok;
+        Interval  >  0 ->
+            erlang:send_after(Interval, erlang:self(), refresh)
+    end,
+    {ok,
+     #state{directory_change_time = Initial,
+      whitelist_directory = Path,
+      refresh_interval = Interval}}.
+
+handle_call(mode, _, St) ->
+    {reply, mode(St), St};
+handle_call(refresh, _, St) ->
+    {reply, refresh(St), St};
+handle_call(list, _, St) ->
+    {reply, list(St), St};
+handle_call(_, _, St) ->
+    {noreply, St}.
+
+handle_cast(_, St) ->
+    {noreply, St}.
+
+handle_info(refresh, #state{refresh_interval = Interval} = St) ->
+    New = refresh(St),
+    erlang:send_after(Interval, erlang:self(), refresh),
+    {noreply, St#state{directory_change_time = New}};
+handle_info(_, St) ->
+    {noreply, St}.
+
+terminate(shutdown, _St) ->
+    true = ets:delete(table_name()).
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+
+%% Ancillary & Constants
+
+list(#state{whitelist_directory = Path}) ->
+    Formatted =
+        [format_cert(Path, F, S) ||
+         #entry{filename = F, identifier = {_, S}} <- ets:tab2list(table_name())],
+    to_big_string(Formatted).
+
+mode(#state{refresh_interval = I}) ->
+    if
+        I =:= 0 -> 'manual';
+        I  >  0 -> 'automatic'
+    end.
+
+refresh(#state{whitelist_directory = Path, directory_change_time = Old}) ->
+    New = modification_time(Path),
+    case New > Old of
+        false ->
+            ok;
+        true  ->
+            tabulate(Path)
+    end,
+    New.
+
+refresh_interval(Pairs) ->
+    {refresh_interval, S} = lists:keyfind(refresh_interval, 1, Pairs),
+    timer:seconds(S).
+
+path(Pairs) ->
+    {directory, Path} = lists:keyfind(directory, 1, Pairs),
+    Path.
+
+table_name() ->
+    trust_store_whitelist.
+
+table_options() ->
+    [protected,
+     named_table,
+     set,
+     {keypos, #entry.identifier},
+     {heir, none}].
+
+modification_time(Path) ->
+    {ok, Info} = file:read_file_info(Path, [{time, posix}]),
+    Info#file_info.mtime.
+
+already_whitelisted_filenames() ->
+    ets:select(table_name(),
+        ets:fun2ms(fun (#entry{filename = N, change_time = T}) -> {N, T} end)).
+
+one_whitelisted_filename({Name, Time}) ->
+    ets:fun2ms(fun (#entry{filename = N, change_time = T}) when N =:= Name, T =:= Time -> true end).
+
+build_entry(Path, {Name, Time}) ->
+    Absolute    = filename:join(Path, Name),
+    Certificate = scan_then_parse(Absolute),
+    Unique      = extract_unique_attributes(Certificate),
+    Unique#entry{filename = Name, change_time = Time}.
+
+try_build_entry(Path, {Name, Time}) ->
+    try build_entry(Path, {Name, Time}) of
+        Entry ->
+            rabbit_log:info(
+              "trust store: loading certificate '~s'", [Name]),
+            {ok, Entry}
+    catch
+        _:Err ->
+            rabbit_log:error(
+              "trust store: failed to load certificate '~s', error: ~p",
+              [Name, Err]),
+            {error, Err}
+    end.
+
+do_insertions(Before, After, Path) ->
+    Entries = [try_build_entry(Path, NameTime) ||
+                       NameTime <- (After -- Before)],
+    [insert(Entry) || {ok, Entry} <- Entries].
+
+do_removals(Before, After) ->
+    [delete(NameTime) || NameTime <- (Before -- After)].
+
+get_new(Path) ->
+    {ok, New} = file:list_dir(Path),
+    [{X, modification_time(filename:absname(X, Path))} || X <- New].
+
+tabulate(Path) ->
+    Old = already_whitelisted_filenames(),
+    New = get_new(Path),
+    do_insertions(Old, New, Path),
+    do_removals(Old, New),
+    ok.
+
+delete({Name, Time}) ->
+    rabbit_log:info("removing certificate '~s'", [Name]),
+    ets:select_delete(table_name(), one_whitelisted_filename({Name, Time})).
+
+insert(Entry) ->
+    true = ets:insert(table_name(), Entry).
+
+scan_then_parse(Filename) when is_list(Filename) ->
+    {ok, Bin} = file:read_file(Filename),
+    [{'Certificate', Data, not_encrypted}] = public_key:pem_decode(Bin),
+    public_key:pkix_decode_cert(Data, otp).
+
+extract_unique_attributes(#'OTPCertificate'{}=C) ->
+    {Serial, Issuer} = case public_key:pkix_issuer_id(C, other) of
+        {error, _Reason} ->
+            {ok, Identifier} = public_key:pkix_issuer_id(C, self),
+            Identifier;
+        {ok, Identifier} ->
+            Identifier
+    end,
+    %% Why change the order of attributes? For the same reason we put
+    %% the *most significant figure* first (on the left hand side).
+    #entry{identifier = {Issuer, Serial}}.
+
+to_big_string(Formatted) ->
+    string:join([cert_to_string(X) || X <- Formatted], "~n~n").
+
+cert_to_string({Name, Serial, Subject, Issuer, Validity}) ->
+    Text =
+        io_lib:format("Name: ~s~nSerial: ~p | 0x~.16.0B~nSubject: ~s~nIssuer: ~s~nValidity: ~p~n",
+                     [ Name, Serial, Serial, Subject, Issuer, Validity]),
+    lists:flatten(Text).
+
+format_cert(Path, Name, Serial) ->
+    {ok, Bin} = file:read_file(filename:join(Path, Name)),
+    [{'Certificate', Data, not_encrypted}] = public_key:pem_decode(Bin),
+    Validity = rabbit_ssl:peer_cert_validity(Data),
+    Subject = rabbit_ssl:peer_cert_subject(Data),
+    Issuer = rabbit_ssl:peer_cert_issuer(Data),
+    {Name, Serial, Subject, Issuer, Validity}.
+
diff --git a/rabbitmq-server/deps/rabbitmq_trust_store/src/rabbit_trust_store_app.erl b/rabbitmq-server/deps/rabbitmq_trust_store/src/rabbit_trust_store_app.erl
new file mode 100644 (file)
index 0000000..1a2881f
--- /dev/null
@@ -0,0 +1,144 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_trust_store_app).
+-behaviour(application).
+-export([change_SSL_options/0]).
+-export([revert_SSL_options/0]).
+-export([start/2, stop/1]).
+-define(DIRECTORY_OR_FILE_NAME_EXISTS, eexist).
+
+
+-rabbit_boot_step({rabbit_trust_store, [
+    {description, "Change necessary SSL options."},
+    {mfa, {?MODULE, change_SSL_options, []}},
+    {cleanup, {?MODULE, revert_SSL_options, []}},
+    %% {requires, ...},
+    {enables, networking}]}).
+
+change_SSL_options() ->
+    After = case application:get_env(rabbit, ssl_options) of
+        undefined ->
+            Before = [],
+            edit(Before);
+        {ok, Before} when is_list(Before) ->
+            ok = application:set_env(rabbit, initial_SSL_options, Before),
+            edit(Before)
+    end,
+    ok = application:set_env(rabbit,
+        ssl_options, After).
+
+revert_SSL_options() ->
+    {ok, Cfg} = application:get_env(rabbit, initial_SSL_options),
+    ok = application:set_env(rabbit, ssl_options, Cfg).
+
+start(normal, _) ->
+
+    %% The below two are properties, that is, tuple of name/value.
+    Path = whitelist_path(),
+    Interval = refresh_interval_time(),
+
+    rabbit_trust_store_sup:start_link([Path, Interval]).
+
+stop(_) ->
+    ok.
+
+
+%% Ancillary & Constants
+
+edit(Options) ->
+    case proplists:get_value(verify_fun, Options) of
+        undefined ->
+            ok;
+        Val       ->
+            rabbit_log:warning("RabbitMQ trust store plugin is used "
+                               "and the verify_fun TLS option is set: ~p. "
+                               "It will be overwritten by the plugin.~n", [Val]),
+            ok
+    end,
+    %% Only enter those options neccessary for this application.
+    lists:keymerge(1, required_options(),
+        [{verify_fun, {delegate(), continue}},
+         {partial_chain, fun partial_chain/1} | Options]).
+
+delegate() -> fun rabbit_trust_store:whitelisted/3.
+
+partial_chain(Chain) ->
+    % special handling of clients that present a chain rather than just a peer cert.
+    case lists:reverse(Chain) of
+        [PeerDer, Ca | _] ->
+            Peer = public_key:pkix_decode_cert(PeerDer, otp),
+            % If the Peer is whitelisted make it's immediate Authority a trusted one.
+            % This means the peer will automatically be validated.
+            case rabbit_trust_store:is_whitelisted(Peer) of
+                true -> {trusted_ca, Ca};
+                false -> unknown_ca
+            end;
+        _ -> unknown_ca
+    end.
+
+required_options() ->
+    [{verify, verify_peer}, {fail_if_no_peer_cert, true}].
+
+whitelist_path() ->
+    Path = case application:get_env(rabbitmq_trust_store, directory) of
+        undefined ->
+            default_directory();
+        {ok, V} when is_binary(V) ->
+            binary_to_list(V);
+        {ok, V} when is_list(V) ->
+            V
+    end,
+    ok = ensure_directory(Path),
+    {directory, Path}.
+
+refresh_interval_time() ->
+    case application:get_env(rabbitmq_trust_store, refresh_interval) of
+        undefined ->
+            {refresh_interval, default_refresh_interval()};
+        {ok, S} when is_integer(S), S >= 0 ->
+            {refresh_interval, S};
+        {ok, {seconds, S}} when is_integer(S), S >= 0 ->
+            {refresh_interval, S}
+    end.
+
+default_directory() ->
+
+    %% Dismantle the directory tree: first the table & meta-data
+    %% directory, then the Mesia database directory, finally the node
+    %% directory where we will place the default whitelist in `Full`.
+
+    Table  = filename:split(rabbit_mnesia:dir()),
+    Mnesia = lists:droplast(Table),
+    Node   = lists:droplast(Mnesia),
+    Full = Node ++ ["trust_store", "whitelist"],
+    filename:join(Full).
+
+default_refresh_interval() ->
+    {ok, I} = application:get_env(rabbitmq_trust_store, default_refresh_interval),
+    I.
+
+ensure_directory(Path) ->
+    ok = ensure_parent_directories(Path),
+    case file:make_dir(Path) of
+        {error, ?DIRECTORY_OR_FILE_NAME_EXISTS} ->
+            true = filelib:is_dir(Path),
+            ok;
+        ok ->
+            ok
+    end.
+
+ensure_parent_directories(Path) ->
+    filelib:ensure_dir(Path).
diff --git a/rabbitmq-server/deps/rabbitmq_trust_store/src/rabbit_trust_store_sup.erl b/rabbitmq-server/deps/rabbitmq_trust_store/src/rabbit_trust_store_sup.erl
new file mode 100644 (file)
index 0000000..5e2562d
--- /dev/null
@@ -0,0 +1,37 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_trust_store_sup).
+-behaviour(supervisor).
+-export([start_link/1]).
+-export([init/1]).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+
+%% ...
+
+start_link(Settings) ->
+
+    supervisor:start_link({local, ?MODULE}, ?MODULE, Settings).
+
+
+%% ...
+
+init(Settings) ->
+    {ok,
+     {{one_for_one, 1, 5},
+      [{trust_store, {rabbit_trust_store, start_link, [Settings]},
+        permanent, timer:seconds(5), worker, [rabbit_trust_store]}]}}.
diff --git a/rabbitmq-server/deps/rabbitmq_trust_store/src/rabbitmq_trust_store.app.src b/rabbitmq-server/deps/rabbitmq_trust_store/src/rabbitmq_trust_store.app.src
new file mode 100644 (file)
index 0000000..4b1775e
--- /dev/null
@@ -0,0 +1,15 @@
+{application, rabbitmq_trust_store, [
+  {description, "Client certificate trust store. Provides a way to whitelist client x509 certificates."},
+  {vsn, "3.6.5"},
+  {modules, []},
+  {registered, []},
+  {mod, {rabbit_trust_store_app, []}},
+  {env, [
+         {default_refresh_interval, 30}
+        ]},
+  {applications, [
+    kernel,
+    stdlib,
+    rabbit
+  ]}
+]}.
diff --git a/rabbitmq-server/deps/rabbitmq_trust_store/test/system_SUITE.erl b/rabbitmq-server/deps/rabbitmq_trust_store/test/system_SUITE.erl
new file mode 100644 (file)
index 0000000..c9a51b3
--- /dev/null
@@ -0,0 +1,679 @@
+-module(system_SUITE).
+-compile([export_all]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-define(SERVER_REJECT_CLIENT, {tls_alert, "unknown ca"}).
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                 library,
+                                 invasive_SSL_option_change,
+                                 validation_success_for_AMQP_client,
+                                 validation_failure_for_AMQP_client,
+                                 validate_chain,
+                                 validate_longer_chain,
+                                 validate_chain_without_whitelisted,
+                                 whitelisted_certificate_accepted_from_AMQP_client_regardless_of_validation_to_root,
+                                 removed_certificate_denied_from_AMQP_client,
+                                 installed_certificate_accepted_from_AMQP_client,
+                                 whitelist_directory_DELTA,
+                                 replaced_whitelisted_certificate_should_be_accepted,
+                                 ensure_configuration_using_binary_strings_is_handled,
+                                 ignore_corrupt_cert,
+                                 ignore_same_cert_with_different_name,
+                                 list
+                               ]}
+    ].
+
+suite() ->
+    [{timetrap, {seconds, 60}}].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE},
+        {rmq_extra_tcp_ports, [tcp_port_amqp_tls_extra]}
+      ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    TestCaseDir = rabbit_ct_helpers:config_to_testcase_name(Config, Testcase),
+    WhitelistDir = filename:join([?config(rmq_certsdir, Config), "trust_store", TestCaseDir]),
+    ok = filelib:ensure_dir(WhitelistDir),
+    ok = file:make_dir(WhitelistDir),
+    Config1 = rabbit_ct_helpers:set_config(Config, {whitelist_dir, WhitelistDir}),
+    rabbit_ct_helpers:testcase_started(Config1, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+
+%% -------------------------------------------------------------------
+%% Testsuite cases
+%% -------------------------------------------------------------------
+
+library(_) ->
+     %% Given: Makefile.
+     {_Root, _Certificate, _Key} = ct_helper:make_certs(),
+     ok.
+
+invasive_SSL_option_change(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+          ?MODULE, invasive_SSL_option_change1, []).
+
+invasive_SSL_option_change1() ->
+    %% Given: Rabbit is started with the boot-steps in the
+    %% Trust-Store's OTP Application file.
+
+    %% When: we get Rabbit's SSL options.
+    {ok, Options} = application:get_env(rabbit, ssl_options),
+
+    %% Then: all necessary settings are correct.
+    verify_peer             = proplists:get_value(verify, Options),
+    true                    = proplists:get_value(fail_if_no_peer_cert, Options),
+    {Verifyfun, _UserState} = proplists:get_value(verify_fun, Options),
+
+    {module, rabbit_trust_store} = erlang:fun_info(Verifyfun, module),
+    {name,   whitelisted}        = erlang:fun_info(Verifyfun, name),
+    ok.
+
+validation_success_for_AMQP_client(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, validation_success_for_AMQP_client1, [Config]).
+
+validation_success_for_AMQP_client1(Config) ->
+    AuthorityInfo = {Root, _AuthorityKey} = erl_make_certs:make_cert([{key, dsa}]),
+    {Certificate, Key} = chain(AuthorityInfo),
+    {Certificate2, Key2} = chain(AuthorityInfo),
+    Port = port(Config),
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+    %% When: Rabbit accepts just this one authority's certificate
+    %% (i.e. these are options that'd be in the configuration
+    %% file).
+    ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+                                                     {cert, Certificate2},
+                                                     {key, Key2} | cfg()], 1),
+
+    %% Then: a client presenting a certifcate rooted at the same
+    %% authority connects successfully.
+    {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+                                                           port = Port,
+                                                           ssl_options = [{cert, Certificate},
+                                                                          {key, Key}]}),
+
+    %% Clean: client & server TLS/TCP.
+    ok = amqp_connection:close(Con),
+    ok = rabbit_networking:stop_tcp_listener(Port).
+
+
+validation_failure_for_AMQP_client(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, validation_failure_for_AMQP_client1, [Config]).
+
+validation_failure_for_AMQP_client1(Config) ->
+    %% Given: a root certificate and a certificate rooted with another
+    %% authority.
+    {Root, Cert, Key}      = ct_helper:make_certs(),
+    {_,  CertOther, KeyOther}    = ct_helper:make_certs(),
+
+    Port = port(Config),
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+
+    %% When: Rabbit accepts certificates rooted with just one
+    %% particular authority.
+    ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+                                                     {cert, Cert},
+                                                     {key, Key} | cfg()], 1),
+
+    %% Then: a client presenting a certificate rooted with another
+    %% authority is REJECTED.
+    {error, ?SERVER_REJECT_CLIENT} =
+     amqp_connection:start(#amqp_params_network{host = Host,
+                                                port = Port,
+                                                ssl_options = [{cert, CertOther},
+                                                               {key, KeyOther}]}),
+
+    %% Clean: server TLS/TCP.
+    ok = rabbit_networking:stop_tcp_listener(Port).
+
+validate_chain(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, validate_chain1, [Config]).
+
+validate_chain1(Config) ->
+    %% Given: a whitelisted certificate `CertTrusted` AND a CA `RootTrusted`
+    {Root, Cert, Key} = ct_helper:make_certs(),
+    {RootTrusted,  CertTrusted, KeyTrusted} = ct_helper:make_certs(),
+
+    Port = port(Config),
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+
+    ok = whitelist(Config, "alice", CertTrusted,  KeyTrusted),
+    ok = change_configuration(rabbitmq_trust_store, [{directory, whitelist_dir(Config)}]),
+
+    ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+                                                     {cert, Cert},
+                                                     {key, Key} | cfg()], 1),
+
+    %% When: a client connects and present `RootTrusted` as well as the `CertTrusted`
+    %% Then: the connection is successful.
+    {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+                                                           port = Port,
+                                                           ssl_options = [{cacerts, [RootTrusted]},
+                                                                          {cert, CertTrusted},
+                                                                          {key, KeyTrusted}]}),
+
+    %% Clean: client & server TLS/TCP
+    ok = amqp_connection:close(Con),
+    ok = rabbit_networking:stop_tcp_listener(Port).
+
+validate_longer_chain(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, validate_longer_chain1, [Config]).
+
+validate_longer_chain1(Config) ->
+
+    {Root, Cert, Key} = ct_helper:make_certs(),
+
+    %% Given: a whitelisted certificate `CertTrusted`
+    %% AND a certificate `CertUntrusted` that is not whitelisted with the same root as `CertTrusted`
+    %% AND `CertInter` intermediate CA
+    %% AND `RootTrusted` CA
+    AuthorityInfo = {RootCA, _AuthorityKey} = erl_make_certs:make_cert([{key, dsa}]),
+    Inter = {CertInter, {KindInter, KeyDataInter, _}} = erl_make_certs:make_cert([{key, dsa}, {issuer, AuthorityInfo}]),
+    KeyInter = {KindInter, KeyDataInter},
+    {CertUntrusted, {KindUntrusted, KeyDataUntrusted, _}} = erl_make_certs:make_cert([{key, dsa}, {issuer, Inter}]),
+    KeyUntrusted = {KindUntrusted, KeyDataUntrusted},
+    {CertTrusted, {Kind, KeyData, _}} = erl_make_certs:make_cert([{key, dsa}, {issuer, Inter}]),
+    KeyTrusted = {Kind, KeyData},
+
+    Port = port(Config),
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+
+    ok = whitelist(Config, "alice", CertTrusted,  KeyTrusted),
+    ok = change_configuration(rabbitmq_trust_store, [{directory, whitelist_dir(Config)}]),
+
+    ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+                                                     {cert, Cert},
+                                                     {key, Key} | cfg()], 1),
+
+    %% When: a client connects and present `CertInter` as well as the `CertTrusted`
+    %% Then: the connection is successful.
+    {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+                                                           port = Port,
+                                                           ssl_options = [{cacerts, [CertInter]},
+                                                                          {cert, CertTrusted},
+                                                                          {key, KeyTrusted}]}),
+
+    %% When: a client connects and present `RootTrusted` and `CertInter` as well as the `CertTrusted`
+    %% Then: the connection is successful.
+    {ok, Con2} = amqp_connection:start(#amqp_params_network{host = Host,
+                                                            port = Port,
+                                                            ssl_options = [{cacerts, [RootCA, CertInter]},
+                                                                           {cert, CertTrusted},
+                                                                           {key, KeyTrusted}]}),
+
+    %% When: a client connects and present `CertInter` and `RootCA` as well as the `CertTrusted`
+    %% Then: the connection is successful.
+    {ok, Con3} = amqp_connection:start(#amqp_params_network{host = Host,
+                                                            port = Port,
+                                                            ssl_options = [{cacerts, [CertInter, RootCA]},
+                                                                           {cert, CertTrusted},
+                                                                           {key, KeyTrusted}]}),
+
+    % %% When: a client connects and present `CertInter` and `RootCA` but NOT `CertTrusted`
+    % %% Then: the connection is not succcessful
+    {error, ?SERVER_REJECT_CLIENT} =
+        amqp_connection:start(#amqp_params_network{host = Host,
+                                                   port = Port,
+                                                   ssl_options = [{cacerts, [RootCA]},
+                                                                  {cert, CertInter},
+                                                                  {key, KeyInter}]}),
+
+    %% When: a client connects and present `CertUntrusted` and `RootCA` and `CertInter`
+    %% Then: the connection is not succcessful
+    %% TODO: for some reason this returns `bad certifice` rather than `unknown ca`
+    {error, {tls_alert, "bad certificate"}} =
+        amqp_connection:start(#amqp_params_network{host = Host,
+                                                   port = Port,
+                                                   ssl_options = [{cacerts, [RootCA, CertInter]},
+                                                                  {cert, CertUntrusted},
+                                                                  {key, KeyUntrusted}]}),
+    %% Clean: client & server TLS/TCP
+    ok = amqp_connection:close(Con),
+    ok = amqp_connection:close(Con2),
+    ok = amqp_connection:close(Con3),
+    ok = rabbit_networking:stop_tcp_listener(Port).
+
+validate_chain_without_whitelisted(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, validate_chain_without_whitelisted1, [Config]).
+
+validate_chain_without_whitelisted1(Config) ->
+    %% Given: a certificate `CertUntrusted` that is NOT whitelisted.
+    {Root, Cert, Key} = ct_helper:make_certs(),
+    {RootUntrusted,  CertUntrusted, KeyUntrusted} = ct_helper:make_certs(),
+
+    Port = port(Config),
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+
+    ok = change_configuration(rabbitmq_trust_store, [{directory, whitelist_dir(Config)}]),
+
+    ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+                                                     {cert, Cert},
+                                                     {key, Key} | cfg()], 1),
+
+    %% When: Rabbit validates paths
+    %% Then: a client presenting the non-whitelisted certificate `CertUntrusted` and `RootUntrusted`
+    %% is rejected 
+    {error, ?SERVER_REJECT_CLIENT} =
+        amqp_connection:start(#amqp_params_network{host = Host,
+                                                   port = Port,
+                                                   ssl_options = [{cacerts, [RootUntrusted]},
+                                                                  {cert, CertUntrusted},
+                                                                  {key, KeyUntrusted}]}),
+
+    ok = rabbit_networking:stop_tcp_listener(Port).
+
+whitelisted_certificate_accepted_from_AMQP_client_regardless_of_validation_to_root(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, whitelisted_certificate_accepted_from_AMQP_client_regardless_of_validation_to_root1, [Config]).
+
+whitelisted_certificate_accepted_from_AMQP_client_regardless_of_validation_to_root1(Config) ->
+    %% Given: a certificate `CertTrusted` AND that it is whitelisted.
+    {Root, Cert, Key} = ct_helper:make_certs(),
+    {_,  CertTrusted, KeyTrusted} = ct_helper:make_certs(),
+
+    Port = port(Config),
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+
+    ok = whitelist(Config, "alice", CertTrusted,  KeyTrusted),
+    ok = change_configuration(rabbitmq_trust_store, [{directory, whitelist_dir(Config)}]),
+
+    %% When: Rabbit validates paths with a different root `R` than
+    %% that of the certificate `CertTrusted`.
+    ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+                                                      {cert, Cert},
+                                                      {key, Key} | cfg()], 1),
+
+    %% Then: a client presenting the whitelisted certificate `C`
+    %% is allowed.
+    {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+                                                          port = Port,
+                                                          ssl_options = [{cert, CertTrusted},
+                                                                         {key, KeyTrusted}]}),
+
+    %% Clean: client & server TLS/TCP
+    ok = amqp_connection:close(Con),
+    ok = rabbit_networking:stop_tcp_listener(Port).
+
+
+removed_certificate_denied_from_AMQP_client(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, removed_certificate_denied_from_AMQP_client1, [Config]).
+
+removed_certificate_denied_from_AMQP_client1(Config) ->
+    %% Given: a certificate `CertOther` AND that it is whitelisted.
+    {Root, Cert, Key} = ct_helper:make_certs(),
+    {_,  CertOther, KeyOther} = ct_helper:make_certs(),
+
+    Port = port(Config),
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+    ok = whitelist(Config, "bob", CertOther,  KeyOther),
+    ok = change_configuration(rabbitmq_trust_store, [{directory, whitelist_dir(Config)},
+                                                     {refresh_interval,
+                                                        {seconds, interval()}}]),
+
+    %% When: we wait for at least one second (the accuracy of the
+    %% file system's time), remove the whitelisted certificate,
+    %% then wait for the trust-store to refresh the whitelist.
+    ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+                                                      {cert, Cert},
+                                                      {key, Key} | cfg()], 1),
+
+    wait_for_file_system_time(),
+    ok = delete("bob.pem", Config),
+    wait_for_trust_store_refresh(),
+
+    %% Then: a client presenting the removed whitelisted
+    %% certificate `CertOther` is denied.
+    {error, ?SERVER_REJECT_CLIENT} =
+       amqp_connection:start(#amqp_params_network{host = Host,
+                                                  port = Port,
+                                                  ssl_options = [{cert, CertOther},
+                                                                 {key, KeyOther}]}),
+
+    %% Clean: server TLS/TCP
+    ok = rabbit_networking:stop_tcp_listener(Port).
+
+
+installed_certificate_accepted_from_AMQP_client(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, installed_certificate_accepted_from_AMQP_client1, [Config]).
+
+installed_certificate_accepted_from_AMQP_client1(Config) ->
+    %% Given: a certificate `CertOther` which is NOT yet whitelisted.
+    {Root, Cert, Key} = ct_helper:make_certs(),
+    {_,  CertOther, KeyOther} = ct_helper:make_certs(),
+
+    Port = port(Config),
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+
+    ok = change_configuration(rabbitmq_trust_store, [{directory, whitelist_dir(Config)},
+                                                    {refresh_interval,
+                                                        {seconds, interval()}}]),
+
+    %% When: we wait for at least one second (the accuracy of the
+    %% file system's time), add a certificate to the directory,
+    %% then wait for the trust-store to refresh the whitelist.
+    ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+                                                      {cert, Cert},
+                                                      {key, Key} | cfg()], 1),
+
+    wait_for_file_system_time(),
+    ok = whitelist(Config, "charlie", CertOther,  KeyOther),
+    wait_for_trust_store_refresh(),
+
+    %% Then: a client presenting the whitelisted certificate `CertOther`
+    %% is allowed.
+    {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+                                                          port = Port,
+                                                          ssl_options = [{cert, CertOther},
+                                                                         {key, KeyOther}]}),
+
+    %% Clean: Client & server TLS/TCP
+    ok = amqp_connection:close(Con),
+    ok = rabbit_networking:stop_tcp_listener(Port).
+
+
+whitelist_directory_DELTA(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, whitelist_directory_DELTA1, [Config]).
+
+whitelist_directory_DELTA1(Config) ->
+    %% Given: a certificate `Root` which Rabbit can use as a
+    %% root certificate to validate agianst AND three
+    %% certificates which clients can present (the first two
+    %% of which are whitelisted).
+    Port = port(Config),
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+    {Root, Cert, Key} = ct_helper:make_certs(),
+
+    {_,  CertListed1, KeyListed1} = ct_helper:make_certs(),
+    {_,  CertRevoked, KeyRevoked} = ct_helper:make_certs(),
+    {_,  CertListed2, KeyListed2} = ct_helper:make_certs(),
+
+    ok = whitelist(Config, "foo", CertListed1,  KeyListed1),
+    ok = whitelist(Config, "bar", CertRevoked,  KeyRevoked),
+    ok = change_configuration(rabbitmq_trust_store, [{directory, whitelist_dir(Config)},
+                                                    {refresh_interval,
+                                                     {seconds, interval()}}]),
+
+    %% When: we wait for at least one second (the accuracy
+    %% of the file system's time), delete a certificate and
+    %% a certificate to the directory, then wait for the
+    %% trust-store to refresh the whitelist.
+    ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+                                                      {cert, Cert},
+                                                      {key, Key} | cfg()], 1),
+
+    wait_for_file_system_time(),
+    ok = delete("bar.pem", Config),
+    ok = whitelist(Config, "baz", CertListed2,  KeyListed2),
+    wait_for_trust_store_refresh(),
+
+    %% Then: connectivity to Rabbit is as it should be.
+    {ok, Conn1} = amqp_connection:start(#amqp_params_network{host = Host,
+                                                            port = Port,
+                                                            ssl_options = [{cert, CertListed1},
+                                                                           {key, KeyListed1}]}),
+    {error, ?SERVER_REJECT_CLIENT} =
+        amqp_connection:start(#amqp_params_network{host = Host,
+                                                   port = Port,
+                                                   ssl_options = [{cert, CertRevoked},
+                                                                  {key, KeyRevoked}]}),
+
+    {ok, Conn2} = amqp_connection:start(#amqp_params_network{host = Host,
+                                                            port = Port,
+                                                            ssl_options = [{cert, CertListed2},
+                                                                           {key, KeyListed2}]}),
+
+    %% Clean: delete certificate file, close client & server
+    %% TLS/TCP
+    ok = amqp_connection:close(Conn1),
+    ok = amqp_connection:close(Conn2),
+
+    ok = rabbit_networking:stop_tcp_listener(Port).
+
+replaced_whitelisted_certificate_should_be_accepted(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, replaced_whitelisted_certificate_should_be_accepted1, [Config]).
+
+replaced_whitelisted_certificate_should_be_accepted1(Config) ->
+    %% Given: a root certificate and a 2 other certificates
+    {Root, Cert, Key}      = ct_helper:make_certs(),
+    {_,  CertFirst, KeyFirst}    = ct_helper:make_certs(),
+    {_,  CertUpdated, KeyUpdated}    = ct_helper:make_certs(),
+
+    Port = port(Config),
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+
+    ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+                                                    {cert, Cert},
+                                                    {key, Key} | cfg()], 1),
+    %% And: the first certificate has been whitelisted
+    ok = whitelist(Config, "bart", CertFirst,  KeyFirst),
+    ok = change_configuration(rabbitmq_trust_store, [{directory, whitelist_dir(Config)},
+                                                  {refresh_interval, {seconds, interval()}}]),
+
+    wait_for_trust_store_refresh(),
+
+    %% verify that the first cert can be used to connect
+    {ok, Con} =
+     amqp_connection:start(#amqp_params_network{host = Host,
+                                                port = Port,
+                                                ssl_options = [{cert, CertFirst},
+                                                               {key, KeyFirst} ]}),
+    %% verify the other certificate is not accepted
+    {error, ?SERVER_REJECT_CLIENT} =
+     amqp_connection:start(#amqp_params_network{host = Host,
+                                                port = Port,
+                                                ssl_options = [{cert, CertUpdated},
+                                                               {key, KeyUpdated} ]}),
+    ok = amqp_connection:close(Con),
+
+    %% When: a whitelisted certicate is replaced with one with the same name
+    ok = whitelist(Config, "bart", CertUpdated,  KeyUpdated),
+
+    wait_for_trust_store_refresh(),
+
+    %% Then: the first certificate should be rejected
+    {error, ?SERVER_REJECT_CLIENT} =
+     amqp_connection:start(#amqp_params_network{host = Host,
+                                                port = Port,
+                                                ssl_options = [{cert, CertFirst},
+                                                               %% disable ssl session caching
+                                                               %% as this ensures the cert
+                                                               %% will be re-verified by the
+                                                               %% server
+                                                               {reuse_sessions, false},
+                                                               {key, KeyFirst} ]}),
+
+    %% And: the updated certificate should allow the user to connect
+    {ok, Con2} =
+     amqp_connection:start(#amqp_params_network{host = Host,
+                                                port = Port,
+                                                ssl_options = [{cert, CertUpdated},
+                                                               {reuse_sessions, false},
+                                                               {key, KeyUpdated} ]}),
+    ok = amqp_connection:close(Con2),
+    %% Clean: server TLS/TCP.
+    ok = rabbit_networking:stop_tcp_listener(Port).
+
+
+ensure_configuration_using_binary_strings_is_handled(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, ensure_configuration_using_binary_strings_is_handled1, [Config]).
+
+ensure_configuration_using_binary_strings_is_handled1(Config) ->
+    ok = change_configuration(rabbitmq_trust_store, [{directory, list_to_binary(whitelist_dir(Config))},
+                                                    {refresh_interval,
+                                                        {seconds, interval()}}]).
+
+ignore_corrupt_cert(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, ignore_corrupt_cert1, [Config]).
+
+ignore_corrupt_cert1(Config) ->
+    %% Given: a certificate `CertTrusted` AND that it is whitelisted.
+    %% Given: a corrupt certificate.
+
+    Port = port(Config),
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+    {Root, Cert, Key} = ct_helper:make_certs(),
+    {_,  CertTrusted, KeyTrusted} = ct_helper:make_certs(),
+
+    WhitelistDir = whitelist_dir(Config),
+    ok = change_configuration(rabbitmq_trust_store, [{directory, WhitelistDir}]),
+    ok = whitelist(Config, "alice", CertTrusted,  KeyTrusted),
+
+    %% When: Rabbit tries to whitelist the corrupt certificate.
+    ok = whitelist(Config, "corrupt", <<48>>,  KeyTrusted),
+    ok = change_configuration(rabbitmq_trust_store, [{directory, WhitelistDir}]),
+
+    ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+                                                      {cert, Cert},
+                                                      {key, Key} | cfg()], 1),
+
+    %% Then: the trust store should keep functioning
+    %% And: a client presenting the whitelisted certificate `CertTrusted`
+    %% is allowed.
+    {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+                                                           port = Port,
+                                                           ssl_options = [{cert, CertTrusted},
+                                                                          {key, KeyTrusted}]}),
+
+    %% Clean: client & server TLS/TCP
+    ok = amqp_connection:close(Con),
+    ok = rabbit_networking:stop_tcp_listener(Port).
+
+ignore_same_cert_with_different_name(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE, ignore_same_cert_with_different_name1, [Config]).
+
+ignore_same_cert_with_different_name1(Config) ->
+    %% Given: a certificate `CertTrusted` AND that it is whitelisted.
+    %% Given: the same certificate saved with a different filename.
+
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+    Port = port(Config),
+    {Root, Cert, Key} = ct_helper:make_certs(),
+    {_,  CertTrusted, KeyTrusted} = ct_helper:make_certs(),
+
+    WhitelistDir = whitelist_dir(Config),
+
+    ok = change_configuration(rabbitmq_trust_store, [{directory, WhitelistDir}]),
+    ok = whitelist(Config, "alice", CertTrusted,  KeyTrusted),
+    %% When: Rabbit tries to insert the duplicate certificate
+    ok = whitelist(Config, "malice", CertTrusted,  KeyTrusted),
+    ok = change_configuration(rabbitmq_trust_store, [{directory, WhitelistDir}]),
+
+    ok = rabbit_networking:start_ssl_listener(Port, [{cacerts, [Root]},
+                                                      {cert, Cert},
+                                                      {key, Key} | cfg()], 1),
+
+    %% Then: the trust store should keep functioning.
+    %% And: a client presenting the whitelisted certificate `CertTrusted`
+    %% is allowed.
+    {ok, Con} = amqp_connection:start(#amqp_params_network{host = Host,
+                                                           port = Port,
+                                                           ssl_options = [{cert, CertTrusted},
+                                                                          {key, KeyTrusted}]}),
+
+    %% Clean: client & server TLS/TCP
+    ok = amqp_connection:close(Con),
+    ok = rabbit_networking:stop_tcp_listener(Port).
+
+list(Config) ->
+    {_Root,  Cert, Key}    = ct_helper:make_certs(),
+    ok = whitelist(Config, "alice", Cert,  Key),
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+           ?MODULE,  change_configuration, [rabbitmq_trust_store, [{directory, whitelist_dir(Config)}]]),
+    Certs = rabbit_ct_broker_helpers:rpc(Config, 0,
+           rabbit_trust_store, list, []),
+    % only really tests it isn't totally broken.
+    {match, _} = re:run(Certs, ".*alice\.pem.*").
+
+%% Test Constants
+
+port(Config) ->
+    rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp_tls_extra).
+
+whitelist_dir(Config) ->
+    ?config(whitelist_dir, Config).
+
+interval() ->
+    1.
+
+wait_for_file_system_time() ->
+    timer:sleep(timer:seconds(1)).
+
+wait_for_trust_store_refresh() ->
+    timer:sleep(2 * timer:seconds(interval())).
+
+cfg() ->
+    {ok, Cfg} = application:get_env(rabbit, ssl_options),
+    Cfg.
+
+%% Ancillary
+
+chain(Issuer) ->
+    %% Theses are DER encoded.
+    {Certificate, {Kind, Key, _}} = erl_make_certs:make_cert([{key, dsa}, {issuer, Issuer}]),
+    {Certificate, {Kind, Key}}.
+
+change_configuration(App, Props) ->
+    ok = application:stop(App),
+    ok = change_cfg(App, Props),
+    application:start(App).
+
+change_cfg(_, []) ->
+    ok;
+change_cfg(App, [{Name,Value}|Rest]) ->
+    ok = application:set_env(App, Name, Value),
+    change_cfg(App, Rest).
+
+whitelist(Config, Filename, Certificate, {A, B} = _Key) ->
+    Path = whitelist_dir(Config),
+    ok = erl_make_certs:write_pem(Path, Filename, {Certificate, {A, B, not_encrypted}}),
+    [file:delete(filename:join(Path, K)) || K <- filelib:wildcard("*_key.pem", Path)],
+    ok.
+
+delete(Name, Config) ->
+    F = filename:join([whitelist_dir(Config), Name]),
+    file:delete(F).
diff --git a/rabbitmq-server/deps/rabbitmq_web_dispatch/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_web_dispatch/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index dba888e5ce98b0aac1f5e0d6610260397082c035..39321893c38a9fe77eac372860d6ff62dededd09 100644 (file)
@@ -1,7 +1,7 @@
 PROJECT = rabbitmq_web_dispatch
 
 DEPS = mochiweb webmachine
-TESTS_DEPS = amqp_client
+TEST_DEPS = rabbit amqp_client
 
 DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk
 
@@ -13,6 +13,3 @@ ERLANG_MK_COMMIT = rabbitmq-tmp
 
 include rabbitmq-components.mk
 include erlang.mk
-
-WITH_BROKER_TEST_COMMANDS := rabbit_web_dispatch_test:test()
-STANDALONE_TEST_COMMANDS := rabbit_web_dispatch_test_unit:test()
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index 5d1028fc1e894d460605b5356ba0ea8e0856dcab..3d11529bf0adfecce3aa315975131f5bdbbbb3e8 100644 (file)
 
 render_error(Code, Req, Reason) ->
     case Req:has_response_body() of
-        {true, _}  -> maybe_log(Req, Reason),
-                      Req:response_body();
+        {true, _}  ->
+            maybe_log(Req, Reason),
+            {Body, ReqState0} = Req:response_body(),
+            {ok, ReqState} =
+                webmachine_request:remove_response_header("Content-Encoding",
+                                                          ReqState0),
+            {Body, ReqState};
         {false, _} -> render_error_body(Code, Req:trim_state(), Reason)
     end.
 
@@ -37,7 +42,8 @@ render_error_body(404,  Req, _)      -> error_body(404,  Req, "Not Found");
 render_error_body(Code, Req, Reason) -> error_body(Code, Req, Reason).
 
 error_body(Code, Req, Reason) ->
-    {ok, ReqState} = Req:add_response_header("Content-Type","application/json"),
+    {ok, _ReqState0} = Req:add_response_header("Content-Type","application/json"),
+    {ok, ReqState} = Req:remove_response_header("Content-Encoding"),
     case Code of
         500 -> maybe_log(Req, Reason);
         _   -> ok
index f1beb4b42f742a82d348543fa9d77e7708f13f19..cd521f3bc71c18bc1f3acb42b25e116e11a3086e 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_web_dispatch,
  [{description, "RabbitMQ Web Dispatcher"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {mod, {rabbit_web_dispatch_app, []}},
diff --git a/rabbitmq-server/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_SUITE.erl b/rabbitmq-server/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_SUITE.erl
new file mode 100644 (file)
index 0000000..22acd3e
--- /dev/null
@@ -0,0 +1,103 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_web_dispatch_SUITE).
+
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+                                query_static_resource_test,
+                                add_idempotence_test
+                               ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    inets:start(),
+    rabbit_ct_helpers:log_environment(),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, ?MODULE},
+        {rmq_extra_tcp_ports, [tcp_port_http_extra]}
+      ]),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    Config.
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(_Testcase, Config) ->
+    Config.
+
+end_per_testcase(_Testcase, Config) ->
+    Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+query_static_resource_test(Config) ->
+    Host = rabbit_ct_helpers:get_config(Config, rmq_hostname),
+    Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_http_extra),
+    rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, query_static_resource_test1, [Config, Host, Port]).
+query_static_resource_test1(_Config, Host, Port) ->
+    %% TODO this is a fairly rubbish test, but not as bad as it was
+    rabbit_web_dispatch:register_static_context(test, [{port, Port}],
+                                                "rabbit_web_dispatch_test",
+                                                ?MODULE, "test/priv/www", "Test"),
+    {ok, {_Status, _Headers, Body}} =
+        httpc:request(format("http://~s:~w/rabbit_web_dispatch_test/index.html", [Host, Port])),
+    ?assert(string:str(Body, "RabbitMQ HTTP Server Test Page") /= 0),
+
+    passed.
+
+add_idempotence_test(Config) ->
+    Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_http_extra),
+    rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, add_idempotence_test1, [Port]).
+add_idempotence_test1(Port) ->
+    F = fun(_Req) -> ok end,
+    L = {"/foo", "Foo"},
+    rabbit_web_dispatch_registry:add(foo, [{port, Port}], F, F, L),
+    rabbit_web_dispatch_registry:add(foo, [{port, Port}], F, F, L),
+    ?assertEqual(
+       1, length([ok || {"/foo", _, _} <-
+                            rabbit_web_dispatch_registry:list_all()])),
+    passed.
+
+
+
+format(Fmt, Val) ->
+    lists:flatten(io_lib:format(Fmt, Val)).
similarity index 54%
rename from rabbitmq-server/deps/rabbitmq_web_dispatch/test/src/rabbit_web_dispatch_test_unit.erl
rename to rabbitmq-server/deps/rabbitmq_web_dispatch/test/rabbit_web_dispatch_unit_SUITE.erl
index 1939a8a6fe3b0ea8ce3735392fd4047ba8914906..3adcc9d386c3932b95097ca15c8dacda8a88e4e9 100644 (file)
 %% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_web_dispatch_test_unit).
+-module(rabbit_web_dispatch_unit_SUITE).
 
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
 -include_lib("eunit/include/eunit.hrl").
 
-relativise_test() ->
+all() ->
+    [
+      {group, parallel_tests}
+    ].
+
+groups() ->
+    [
+      {parallel_tests, [parallel], [
+                                    relativise_test,
+                                    unrelativise_test
+                                   ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Test suite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    Config.
+
+end_per_suite(Config) ->
+    Config.
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(_Testcase, Config) ->
+    Config.
+
+end_per_testcase(_Testcase, Config) ->
+    Config.
+
+%% -------------------------------------------------------------------
+%% Test cases.
+%% -------------------------------------------------------------------
+
+relativise_test(_Config) ->
     Rel = fun rabbit_web_dispatch_util:relativise/2,
     ?assertEqual("baz",        Rel("/foo/bar/bash", "/foo/bar/baz")),
     ?assertEqual("../bax/baz", Rel("/foo/bar/bash", "/foo/bax/baz")),
@@ -26,11 +68,15 @@ relativise_test() ->
     ?assertEqual("..",         Rel("/foo/bar/bash", "/foo/bar")),
     ?assertEqual("../..",      Rel("/foo/bar/bash", "/foo")),
     ?assertEqual("bar/baz",    Rel("/foo/bar",      "/foo/bar/baz")),
-    ?assertEqual("foo",        Rel("/",             "/foo")).
+    ?assertEqual("foo",        Rel("/",             "/foo")),
+
+    passed.
 
-unrelativise_test() ->
+unrelativise_test(_Config) ->
     Un = fun rabbit_web_dispatch_util:unrelativise/2,
     ?assertEqual("/foo/bar", Un("/foo/foo", "bar")),
     ?assertEqual("/foo/bar", Un("/foo/foo", "./bar")),
     ?assertEqual("bar",      Un("foo", "bar")),
-    ?assertEqual("/baz/bar", Un("/foo/foo", "../baz/bar")).
+    ?assertEqual("/baz/bar", Un("/foo/foo", "../baz/bar")),
+
+    passed.
diff --git a/rabbitmq-server/deps/rabbitmq_web_dispatch/test/src/rabbit_web_dispatch_test.erl b/rabbitmq-server/deps/rabbitmq_web_dispatch/test/src/rabbit_web_dispatch_test.erl
deleted file mode 100644 (file)
index ce0ad61..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(rabbit_web_dispatch_test).
-
--include_lib("eunit/include/eunit.hrl").
-
-query_static_resource_test() ->
-    %% TODO this is a fairly rubbish test, but not as bad as it was
-    rabbit_web_dispatch:register_static_context(test, [{port, 12345}],
-                                                "rabbit_web_dispatch_test",
-                                                ?MODULE, "test/priv/www", "Test"),
-    {ok, {_Status, _Headers, Body}} =
-        httpc:request("http://localhost:12345/rabbit_web_dispatch_test/index.html"),
-    ?assert(string:str(Body, "RabbitMQ HTTP Server Test Page") /= 0).
-
-add_idempotence_test() ->
-    F = fun(_Req) -> ok end,
-    L = {"/foo", "Foo"},
-    rabbit_web_dispatch_registry:add(foo, [{port, 12345}], F, F, L),
-    rabbit_web_dispatch_registry:add(foo, [{port, 12345}], F, F, L),
-    ?assertEqual(
-       1, length([ok || {"/foo", _, _} <-
-                            rabbit_web_dispatch_registry:list_all()])),
-    passed.
diff --git a/rabbitmq-server/deps/rabbitmq_web_stomp/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_web_stomp/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index 96c0caa2dd369ee9820386836af6fc81e2d5511a..9e53c88bbb35fa8a5cc9feb2adbbbcc53fa3286a 100644 (file)
@@ -1,6 +1,7 @@
 PROJECT = rabbitmq_web_stomp
 
 DEPS = cowboy sockjs rabbitmq_stomp
+TEST_DEPS := $(filter-out rabbitmq_test,$(TEST_DEPS))
 dep_cowboy_commit = 1.0.3
 
 # FIXME: Add Ranch as a BUILD_DEPS to be sure the correct version is picked.
@@ -26,12 +27,8 @@ include erlang.mk
 # Compilation.
 # --------------------------------------------------------------------
 
-ERTS_VER = $(shell erl -version 2>&1 | sed -E 's/.* version //')
-USE_SPECS_MIN_ERTS_VER = 6.0
-ifeq ($(call compare_version,$(ERTS_VER),$(USE_SPECS_MIN_ERTS_VER),<),true)
-SOCKJS_ERLC_OPTS += -Dpre17_type_specs
+SOCKJS_ERLC_OPTS += $(RMQ_ERLC_OPTS)
 export SOCKJS_ERLC_OPTS
-endif
 
 .PHONY: patch-sockjs
 patch-sockjs: $(DEPS_DIR)/sockjs
@@ -40,9 +37,3 @@ patch-sockjs: $(DEPS_DIR)/sockjs
                echo >> $(DEPS_DIR)/sockjs/Makefile; \
                echo 'ERLC_OPTS += $$(SOCKJS_ERLC_OPTS)' >> $(DEPS_DIR)/sockjs/Makefile; \
        fi
-
-# --------------------------------------------------------------------
-# Testing.
-# --------------------------------------------------------------------
-
-WITH_BROKER_TEST_COMMANDS := rabbit_ws_test_all:all_tests()
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index 13340f616b742eaf29eecd4e0f6d5e808bce7f9c..00ea2d6c1040f8b26aefcb1ab04055c719f935f7 100644 (file)
@@ -26,7 +26,7 @@
 -export([init/1, handle_call/3, handle_info/2, terminate/2,
          code_change/3, handle_cast/2]).
 
--record(state, {conn, proc_state, parse_state}).
+-record(state, {conn, proc_state, parse_state, stats_timer, connection, heartbeat_mode, heartbeat, heartbeat_sup}).
 
 %%----------------------------------------------------------------------------
 
@@ -44,57 +44,71 @@ sockjs_closed(Pid) ->
 init({SupPid, Conn, Heartbeat, Conn}) ->
     ok = file_handle_cache:obtain(),
     process_flag(trap_exit, true),
-    {ok, ProcessorState} = init_processor_state(SupPid, Conn, Heartbeat),
-    {ok, #state{conn        = Conn,
-                proc_state  = ProcessorState,
-                parse_state = rabbit_stomp_frame:initial_state()}}.
-
-init_processor_state(SupPid, Conn, Heartbeat) ->
-    StompConfig = #stomp_configuration{implicit_connect = false},
-
+    {ok, ProcessorState} = init_processor_state(Conn),
+    {ok, rabbit_event:init_stats_timer(
+           #state{conn           = Conn,
+                  proc_state     = ProcessorState,
+                  parse_state    = rabbit_stomp_frame:initial_state(),
+                  heartbeat_sup  = SupPid,
+                  heartbeat      = {none, none},
+                  heartbeat_mode = Heartbeat},
+           #state.stats_timer)}.
+
+init_processor_state(Conn) ->
     SendFun = fun (_Sync, Data) ->
                       Conn:send(Data),
                       ok
               end,
-    Pid = self(),
-    ReceiveFun = fun() -> gen_server:cast(Pid, client_timeout) end,
     Info = Conn:info(),
-    Sock = proplists:get_value(socket, Info),
-    {PeerAddr, PeerPort} = proplists:get_value(peername, Info),
-    {SockAddr, SockPort} = proplists:get_value(sockname, Info),
-    Name = rabbit_misc:format("~s:~b -> ~s:~b",
-                              [rabbit_misc:ntoa(PeerAddr), PeerPort,
-                               rabbit_misc:ntoa(SockAddr), SockPort]),
-    AdapterInfo = #amqp_adapter_info{protocol        = {'Web STOMP', 0},
-                                     host            = SockAddr,
-                                     port            = SockPort,
-                                     peer_host       = PeerAddr,
-                                     peer_port       = PeerPort,
-                                     name            = list_to_binary(Name),
-                                     additional_info = [{ssl, rabbit_net:is_ssl(Sock)}]},
-
-    StartHeartbeatFun = case Heartbeat of
-        heartbeat ->
-            fun (SendTimeout, SendFin, ReceiveTimeout, ReceiveFin) ->
-                    rabbit_heartbeat:start(SupPid, Sock, SendTimeout,
-                                           SendFin, ReceiveTimeout, ReceiveFin)
-            end;
-        no_heartbeat ->
-            undefined
+    Headers = proplists:get_value(headers, Info),
+
+    UseHTTPAuth = application:get_env(rabbitmq_web_stomp, use_http_auth, false),
+    StompConfig0 = #stomp_configuration{implicit_connect = false},
+
+    StompConfig = case UseHTTPAuth of
+        true ->
+            {Login, PassCode} = case lists:keyfind(authorization, 1, Headers) of
+                false ->
+                    %% We fall back to the default STOMP credentials.
+                    UserConfig = application:get_env(rabbitmq_stomp,
+                                                     default_user, []),
+                    {proplists:get_value(login, UserConfig),
+                     proplists:get_value(passcode, UserConfig)};
+                {_, AuthHd} ->
+                    {<<"basic">>, {HTTPLogin, HTTPPassCode}}
+                        = cowboy_http:token_ci(list_to_binary(AuthHd),
+                                               fun cowboy_http:authorization/2),
+                    {HTTPLogin, HTTPPassCode}
+            end,
+            StompConfig0#stomp_configuration{default_login = Login,
+                                             default_passcode = PassCode,
+                                             force_default_creds = true};
+        false ->
+            StompConfig0
     end,
 
+    Sock = proplists:get_value(socket, Info),
+    {PeerAddr, _} = proplists:get_value(peername, Info),
+    AdapterInfo0 = #amqp_adapter_info{additional_info=Extra}
+        = amqp_connection:socket_adapter_info(Sock, {'Web STOMP', 0}),
+    %% Flow control is not supported for Web-STOMP connections.
+    AdapterInfo = AdapterInfo0#amqp_adapter_info{
+        additional_info=[{state, running}|Extra]},
+
     ProcessorState = rabbit_stomp_processor:initial_state(
         StompConfig, 
-        {SendFun, ReceiveFun, AdapterInfo, StartHeartbeatFun, none, PeerAddr}),
+        {SendFun, AdapterInfo, none, PeerAddr}),
     {ok, ProcessorState}.
 
 handle_cast({sockjs_msg, Data}, State = #state{proc_state  = ProcessorState,
-                                               parse_state = ParseState}) ->
-    case process_received_bytes(Data, ProcessorState, ParseState) of
-        {ok, NewProcState, ParseState1} ->
-            {noreply, State#state{
-                            parse_state = ParseState1,
-                            proc_state  = NewProcState}};
+                                               parse_state = ParseState,
+                                               connection  = ConnPid}) ->
+    case process_received_bytes(Data, ProcessorState, ParseState, ConnPid) of
+        {ok, NewProcState, ParseState1, ConnPid1} ->
+            {noreply, ensure_stats_timer(State#state{
+                        parse_state = ParseState1,
+                        proc_state  = NewProcState,
+                        connection  = ConnPid1})};
         {stop, Reason, NewProcState, ParseState1} ->
             {stop, Reason, State#state{
                                 parse_state = ParseState1,
@@ -146,6 +160,27 @@ handle_info(#'basic.cancel'{consumer_tag = Ctag}, State) ->
         {stop, Reason, processor_state(NewProcState, State)}
     end;
 
+handle_info({start_heartbeats, _}, 
+            State = #state{heartbeat_mode = no_heartbeat}) ->
+    {noreply, State};
+
+handle_info({start_heartbeats, {0, 0}}, State) -> 
+    {noreply, State};
+handle_info({start_heartbeats, {SendTimeout, ReceiveTimeout}},
+            State = #state{conn = Conn, 
+                           heartbeat_sup = SupPid,
+                           heartbeat_mode = heartbeat}) ->
+    Info = Conn:info(),
+    Sock = proplists:get_value(socket, Info),
+    Pid = self(),
+    SendFun = fun () -> Conn:send(<<$\n>>), ok end,
+    ReceiveFun = fun() -> gen_server2:cast(Pid, client_timeout) end,
+    Heartbeat = rabbit_heartbeat:start(SupPid, Sock, SendTimeout,
+                                       SendFun, ReceiveTimeout, ReceiveFun),
+    {noreply, State#state{heartbeat = Heartbeat}};
+
+
+
 %%----------------------------------------------------------------------------
 handle_info({'EXIT', From, Reason}, State) ->
   ProcState = processor_state(State),
@@ -157,6 +192,8 @@ handle_info({'EXIT', From, Reason}, State) ->
   end;
 %%----------------------------------------------------------------------------
 
+handle_info(emit_stats, State) ->
+    {noreply, emit_stats(State)};
 
 handle_info(Info, State) ->
     {stop, {odd_info, Info}, State}.
@@ -166,7 +203,8 @@ handle_info(Info, State) ->
 handle_call(Request, _From, State) ->
     {stop, {odd_request, Request}, State}.
 
-terminate(_Reason, #state{conn = Conn, proc_state = ProcessorState}) ->
+terminate(_Reason, State = #state{conn = Conn, proc_state = ProcessorState}) ->
+    maybe_emit_stats(State),
     ok = file_handle_cache:release(),
     rabbit_stomp_processor:flush_and_die(ProcessorState),
     Conn:close(1000, "STOMP died"),
@@ -179,21 +217,42 @@ code_change(_OldVsn, State, _Extra) ->
 %%----------------------------------------------------------------------------
 
 
-process_received_bytes(Bytes, ProcessorState, ParseState) ->
+process_received_bytes(Bytes, ProcessorState, ParseState, ConnPid) ->
     case rabbit_stomp_frame:parse(Bytes, ParseState) of
         {ok, Frame, Rest} ->
             case rabbit_stomp_processor:process_frame(Frame, ProcessorState) of
-                {ok, NewProcState} ->
+                {ok, NewProcState, ConnPid1} ->
                     ParseState1 = rabbit_stomp_frame:initial_state(),
-                    process_received_bytes(Rest, NewProcState, ParseState1);
+                    process_received_bytes(Rest, NewProcState, ParseState1, ConnPid1);
                 {stop, Reason, NewProcState} ->
                     {stop, Reason, NewProcState, ParseState}
             end;
         {more, ParseState1} ->
-            {ok, ProcessorState, ParseState1}
+            {ok, ProcessorState, ParseState1, ConnPid}
     end.
 
 processor_state(#state{ proc_state = ProcState }) -> ProcState.
 processor_state(ProcState, #state{} = State) -> 
   State#state{ proc_state = ProcState}.
 
+%%----------------------------------------------------------------------------
+
+ensure_stats_timer(State) ->
+    rabbit_event:ensure_stats_timer(State, #state.stats_timer, emit_stats).
+
+maybe_emit_stats(State) ->
+    rabbit_event:if_enabled(State, #state.stats_timer,
+                                fun() -> emit_stats(State) end).
+
+emit_stats(State=#state{conn=Conn, connection=ConnPid}) ->
+    Info = Conn:info(),
+    Sock = proplists:get_value(socket, Info),
+    SockInfos = case rabbit_net:getstat(Sock,
+            [recv_oct, recv_cnt, send_oct, send_cnt, send_pend]) of
+        {ok,    SI} -> SI;
+        {error,  _} -> []
+    end,
+    Infos = [{pid, ConnPid}|SockInfos],
+    rabbit_event:notify(connection_stats, Infos),
+    State1 = rabbit_event:reset_stats_timer(State, #state.stats_timer),
+    State1.
index 6677b8000d1481b4a71f1ddce0562b166009e16a..57c3dc42c3f6618ada36beebfce96b15eb100e65 100644 (file)
@@ -34,7 +34,7 @@ start_client({Conn, Heartbeat}) ->
 
 client_spec(SupPid, Conn, Heartbeat, Conn) ->
     {rabbit_ws_client, {rabbit_ws_client, start_link, [{SupPid, Conn, Heartbeat, Conn}]},
-     intrinsic, ?MAX_WAIT, worker, [rabbit_ws_client]}.
+     intrinsic, ?WORKER_WAIT, worker, [rabbit_ws_client]}.
 
 init(_Any) ->
     {ok, {{one_for_all, 0, 1}, []}}.
index 0a480196a7ac2b67636755982e4cb29b8795972c..785814ae4d851c91cbf5cd23dd18b6ad6c094c62 100644 (file)
 init(_, _Req, _Opts) ->
     {upgrade, protocol, cowboy_websocket}.
 
-websocket_init(_TransportName, Req, [{type, FrameType}]) ->
+websocket_init(_TransportName, Req0, [{type, FrameType}]) ->
+    Req = case cowboy_req:header(<<"sec-websocket-protocol">>, Req0) of
+        {undefined, _} -> Req0;
+        {ProtocolHd, _} ->
+            Protocols = parse_sec_websocket_protocol_req(ProtocolHd),
+            case filter_stomp_protocols(Protocols) of
+                [] -> Req0;
+                [StompProtocol|_] ->
+                    cowboy_req:set_resp_header(<<"sec-websocket-protocol">>,
+                        StompProtocol, Req0)
+            end
+    end,
     {Peername, _} = cowboy_req:peer(Req),
     [Socket, Transport] = cowboy_req:get([socket, transport], Req),
     {ok, Sockname} = Transport:sockname(Socket),
+    Headers = case cowboy_req:header(<<"authorization">>, Req) of
+        {undefined, _} -> [];
+        {AuthHd, _}    -> [{authorization, binary_to_list(AuthHd)}]
+    end,
     Conn = {?MODULE, self(), [
         {socket, Socket},
         {peername, Peername},
-        {sockname, Sockname}]},
+        {sockname, Sockname},
+        {headers, Headers}]},
     {ok, _Sup, Pid} = rabbit_ws_sup:start_client({Conn, heartbeat}),
     {ok, Req, #state{pid=Pid, type=FrameType}}.
 
@@ -67,6 +83,25 @@ websocket_terminate(_Reason, _Req, #state{pid=Pid}) ->
     rabbit_ws_client:sockjs_closed(Pid),
     ok.
 
+%% When moving to Cowboy 2, this code should be replaced
+%% with a simple call to cow_http_hd:parse_sec_websocket_protocol_req/1.
+
+parse_sec_websocket_protocol_req(Bin) ->
+    Protocols = binary:split(Bin, [<<$,>>, <<$\s>>], [global]),
+    [P || P <- Protocols, P =/= <<>>].
+
+%% The protocols v10.stomp, v11.stomp and v12.stomp are registered
+%% at IANA: https://www.iana.org/assignments/websocket/websocket.xhtml
+
+filter_stomp_protocols(Protocols) ->
+    lists:reverse(lists:sort(lists:filter(
+        fun(<< "v1", C, ".stomp">>)
+            when C =:= $2; C =:= $1; C =:= $0 -> true;
+           (_) ->
+            false
+        end,
+        Protocols))).
+
 %% SockJS connection handling.
 
 %% The following functions are replicating the functionality
index fe660274060ce99492faccd93fd78842c0cdafc6..46be5f02aa71f1c953a39fdd4a7f29f222aa7b97 100644 (file)
@@ -1,7 +1,7 @@
 {application, rabbitmq_web_stomp,
  [
   {description, "Rabbit WEB-STOMP - WebSockets to Stomp adapter"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {mod, {rabbit_ws_app, []}},
@@ -12,6 +12,7 @@
          {num_ssl_acceptors, 1},
          {cowboy_opts, []},
          {sockjs_opts, []},
-         {ws_frame, text}]},
+         {ws_frame, text},
+         {use_http_auth, false}]},
   {applications, [kernel, stdlib, rabbit, rabbitmq_stomp, cowboy, sockjs]}
  ]}.
similarity index 51%
rename from rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_cowboy_websocket.erl
rename to rabbitmq-server/deps/rabbitmq_web_stomp/test/cowboy_websocket_SUITE.erl
index b60ead65f381f09f9cf749a0a4049c9a4f39e3f0..445227e7b0dad37111e9b37179a90ba38ff9a7fa 100644 (file)
 %%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
 %%
 
--module(rabbit_ws_test_cowboy_websocket).
+-module(cowboy_websocket_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
 
 -include_lib("eunit/include/eunit.hrl").
 
-connection_test() ->
-    WS = rfc6455_client:new("ws://127.0.0.1:15674/ws", self()),
+all() ->
+    [
+    connection,
+    pubsub,
+    pubsub_binary,
+    disconnect,
+    http_auth
+    ].
+
+init_per_suite(Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config,
+                                           [{rmq_nodename_suffix, ?MODULE}]),
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+                                      rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(pubsub_binary, Config) ->
+    rabbit_ws_test_util:update_app_env(Config, ws_frame, binary),
+    Config;
+init_per_testcase(http_auth, Config) ->
+    rabbit_ws_test_util:update_app_env(Config, use_http_auth, true),
+    Config;
+init_per_testcase(_, Config) -> Config.
+
+end_per_testcase(pubsub_binary, Config) ->
+    rabbit_ws_test_util:update_app_env(Config, ws_frame, text),
+    Config;
+end_per_testcase(http_auth, Config) ->
+    rabbit_ws_test_util:update_app_env(Config, use_http_auth, false),
+    Config;
+end_per_testcase(_, Config) -> Config.
+
+
+connection(Config) ->
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
     {ok, _} = rfc6455_client:open(WS),
     {close, _} = rfc6455_client:close(WS),
     ok.
@@ -36,8 +77,9 @@ raw_recv(WS) ->
     stomp:unmarshal(P).
 
 
-pubsub_test() ->
-    WS = rfc6455_client:new("ws://127.0.0.1:15674/ws", self()),
+pubsub(Config) ->
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
     {ok, _} = rfc6455_client:open(WS),
     ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
 
@@ -69,14 +111,9 @@ raw_recv_binary(WS) ->
     stomp:unmarshal(P).
 
 
-pubsub_binary_test() ->
-    %% Set frame type to binary and restart the web stomp application.
-    ok = application:set_env(rabbitmq_web_stomp, ws_frame, binary),
-    ok = application:stop(rabbitmq_web_stomp),
-    ok = cowboy:stop_listener(http),
-    ok = application:start(rabbitmq_web_stomp),
-
-    WS = rfc6455_client:new("ws://127.0.0.1:15674/ws", self()),
+pubsub_binary(Config) ->
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
     {ok, _} = rfc6455_client:open(WS),
     ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
 
@@ -93,17 +130,11 @@ pubsub_binary_test() ->
     {<<"MESSAGE">>, H, <<"a\x00a">>} = raw_recv_binary(WS),
     Dst = binary_to_list(proplists:get_value(<<"destination">>, H)),
 
-    {close, _} = rfc6455_client:close(WS),
-
-    %% Set frame type back to text and restart the web stomp application.
-    ok = application:set_env(rabbitmq_web_stomp, ws_frame, text),
-    ok = application:stop(rabbitmq_web_stomp),
-    ok = cowboy:stop_listener(http),
-    ok = application:start(rabbitmq_web_stomp).
-
+    {close, _} = rfc6455_client:close(WS).
 
-disconnect_test() ->
-    WS = rfc6455_client:new("ws://127.0.0.1:15674/ws", self()),
+disconnect(Config) ->
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self()),
     {ok, _} = rfc6455_client:open(WS),
     ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
 
@@ -113,3 +144,25 @@ disconnect_test() ->
     {close, {1000, _}} = rfc6455_client:recv(WS),
 
     ok.
+
+http_auth(Config) ->
+    %% Intentionally put bad credentials in the CONNECT frame,
+    %% and good credentials in the Authorization header, to
+    %% confirm that the right credentials are picked.
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/ws", self(),
+        [{login, "guest"}, {passcode, "guest"}]),
+    {ok, _} = rfc6455_client:open(WS),
+    ok = raw_send(WS, "CONNECT", [{"login", "bad"}, {"passcode", "bad"}]),
+    {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+    {close, _} = rfc6455_client:close(WS),
+
+    %% Confirm that if no Authorization header is provided,
+    %% the default STOMP plugin credentials are used. We
+    %% expect an error because the default credentials are
+    %% left undefined.
+    WS2 = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/stomp/websocket", self()),
+    {ok, _} = rfc6455_client:open(WS2),
+    ok = raw_send(WS2, "CONNECT", [{"login", "bad"}, {"passcode", "bad"}]),
+    {<<"ERROR">>, _, _} = raw_recv(WS2),
+    {close, _} = rfc6455_client:close(WS2).
\ No newline at end of file
diff --git a/rabbitmq-server/deps/rabbitmq_web_stomp/test/raw_websocket_SUITE.erl b/rabbitmq-server/deps/rabbitmq_web_stomp/test/raw_websocket_SUITE.erl
new file mode 100644 (file)
index 0000000..8b5a881
--- /dev/null
@@ -0,0 +1,138 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ Management Console.
+%%
+%%   The Initial Developer of the Original Code is GoPivotal, Inc.
+%%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(raw_websocket_SUITE).
+
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+    [
+    connection,
+    connection_with_protocols,
+    pubsub,
+    disconnect,
+    http_auth
+    ].
+
+init_per_suite(Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config,
+                                           [{rmq_nodename_suffix, ?MODULE}]),
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+                                      rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(http_auth, Config) ->
+    rabbit_ws_test_util:update_app_env(Config, use_http_auth, true),
+    Config;
+init_per_testcase(_, Config) -> Config.
+
+end_per_testcase(http_auth, Config) ->
+    rabbit_ws_test_util:update_app_env(Config, use_http_auth, false),
+    Config;
+end_per_testcase(_, Config) -> Config.
+
+connection(Config) ->
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/stomp/websocket", self()),
+    {ok, _} = rfc6455_client:open(WS),
+    {close, _} = rfc6455_client:close(WS),
+    ok.
+
+connection_with_protocols(Config) ->
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/stomp/websocket", self(),
+        undefined, ["v11.stomp", "v10.stomp", "v12.stomp"]),
+    {ok, _} = rfc6455_client:open(WS),
+    {close, _} = rfc6455_client:close(WS),
+    ok.
+
+
+raw_send(WS, Command, Headers) ->
+    raw_send(WS, Command, Headers, <<>>).
+raw_send(WS, Command, Headers, Body) ->
+    Frame = stomp:marshal(Command, Headers, Body),
+    rfc6455_client:send(WS, Frame).
+
+raw_recv(WS) ->
+    {ok, P} = rfc6455_client:recv(WS),
+    stomp:unmarshal(P).
+
+
+pubsub(Config) ->
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/stomp/websocket", self()),
+    {ok, _} = rfc6455_client:open(WS),
+    ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
+
+    {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+
+    Dst = "/topic/test-" ++ stomp:list_to_hex(binary_to_list(crypto:rand_bytes(8))),
+
+    ok = raw_send(WS, "SUBSCRIBE", [{"destination", Dst},
+                                    {"id", "s0"}]),
+
+    ok = raw_send(WS, "SEND", [{"destination", Dst},
+                              {"content-length", "3"}], <<"a\x00a">>),
+
+    {<<"MESSAGE">>, H, <<"a\x00a">>} = raw_recv(WS),
+    Dst = binary_to_list(proplists:get_value(<<"destination">>, H)),
+
+    {close, _} = rfc6455_client:close(WS),
+    ok.
+
+
+disconnect(Config) ->
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/stomp/websocket", self()),
+    {ok, _} = rfc6455_client:open(WS),
+    ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
+
+    {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+
+    ok = raw_send(WS, "DISCONNECT", []),
+    {close, {1000, _}} = rfc6455_client:recv(WS),
+
+    ok.
+
+http_auth(Config) ->
+    %% Intentionally put bad credentials in the CONNECT frame,
+    %% and good credentials in the Authorization header, to
+    %% confirm that the right credentials are picked.
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/stomp/websocket", self(),
+        [{login, "guest"}, {passcode, "guest"}]),
+    {ok, _} = rfc6455_client:open(WS),
+    ok = raw_send(WS, "CONNECT", [{"login", "bad"}, {"passcode", "bad"}]),
+    {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
+    {close, _} = rfc6455_client:close(WS),
+
+    %% Confirm that if no Authorization header is provided,
+    %% the default STOMP plugin credentials are used. We
+    %% expect an error because the default credentials are
+    %% left undefined.
+    WS2 = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/stomp/websocket", self()),
+    {ok, _} = rfc6455_client:open(WS2),
+    ok = raw_send(WS2, "CONNECT", [{"login", "bad"}, {"passcode", "bad"}]),
+    {<<"ERROR">>, _, _} = raw_recv(WS2),
+    {close, _} = rfc6455_client:close(WS2).
diff --git a/rabbitmq-server/deps/rabbitmq_web_stomp/test/sockjs_websocket_SUITE.erl b/rabbitmq-server/deps/rabbitmq_web_stomp/test/sockjs_websocket_SUITE.erl
new file mode 100644 (file)
index 0000000..7aad557
--- /dev/null
@@ -0,0 +1,143 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ Management Console.
+%%
+%%   The Initial Developer of the Original Code is GoPivotal, Inc.
+%%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(sockjs_websocket_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+-include_lib("eunit/include/eunit.hrl").
+
+all() ->
+    [
+    connection,
+    pubsub,
+    disconnect,
+    http_auth
+    ].
+
+init_per_suite(Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config,
+                                           [{rmq_nodename_suffix, ?MODULE}]),
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config1,
+                                      rabbit_ct_broker_helpers:setup_steps()).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(http_auth, Config) ->
+    rabbit_ws_test_util:update_app_env(Config, use_http_auth, true),
+    Config;
+init_per_testcase(_, Config) -> Config.
+
+end_per_testcase(http_auth, Config) ->
+    rabbit_ws_test_util:update_app_env(Config, use_http_auth, false),
+    Config;
+end_per_testcase(_, Config) -> Config.
+
+connection(Config) ->
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/stomp/0/0/websocket", self()),
+    {ok, _} = rfc6455_client:open(WS),
+    {ok, <<"o">>} = rfc6455_client:recv(WS),
+
+    {close, _} = rfc6455_client:close(WS),
+    ok.
+
+
+sjs_send(WS, Command, Headers) ->
+    sjs_send(WS, Command, Headers, <<>>).
+sjs_send(WS, Command, Headers, Body) ->
+    StompFrame = stomp:marshal(Command, Headers, Body),
+    SockJSFrame = sockjs_json:encode([StompFrame]),
+    rfc6455_client:send(WS, SockJSFrame).
+
+sjs_recv(WS) ->
+    {ok, P} = rfc6455_client:recv(WS),
+    case P of
+        <<"a", JsonArr/binary>> ->
+            {ok, [StompFrame]} = sockjs_json:decode(JsonArr),
+            {ok, stomp:unmarshal(StompFrame)};
+        <<"c", JsonArr/binary>> ->
+            {ok, CloseReason} = sockjs_json:decode(JsonArr),
+            {close, CloseReason}
+    end.
+
+pubsub(Config) ->
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/stomp/0/0/websocket", self()),
+    {ok, _} = rfc6455_client:open(WS),
+    {ok, <<"o">>} = rfc6455_client:recv(WS),
+
+    ok = sjs_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
+
+    {ok, {<<"CONNECTED">>, _, <<>>}} = sjs_recv(WS),
+
+    Dst = "/topic/test-" ++ stomp:list_to_hex(binary_to_list(crypto:rand_bytes(8))),
+
+    ok = sjs_send(WS, "SUBSCRIBE", [{"destination", Dst},
+                                    {"id", "s0"}]),
+
+    ok = sjs_send(WS, "SEND", [{"destination", Dst},
+                               {"content-length", "3"}], <<"a\x00a">>),
+
+    {ok, {<<"MESSAGE">>, H, <<"a\x00a">>}} = sjs_recv(WS),
+    Dst = binary_to_list(proplists:get_value(<<"destination">>, H)),
+
+    {close, _} = rfc6455_client:close(WS),
+
+    ok.
+
+
+disconnect(Config) ->
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/stomp/0/0/websocket", self()),
+    {ok, _} = rfc6455_client:open(WS),
+    {ok, <<"o">>} = rfc6455_client:recv(WS),
+
+    ok = sjs_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
+    {ok, {<<"CONNECTED">>, _, <<>>}} = sjs_recv(WS),
+
+    ok = sjs_send(WS, "DISCONNECT", []),
+    {close, [1000, _]} = sjs_recv(WS),
+
+    ok.
+
+http_auth(Config) ->
+    %% Intentionally put bad credentials in the CONNECT frame,
+    %% and good credentials in the Authorization header, to
+    %% confirm that the right credentials are picked.
+    PortStr = rabbit_ws_test_util:get_web_stomp_port_str(Config),
+    WS = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/stomp/0/0/websocket", self(),
+        [{login, "guest"}, {passcode, "guest"}]),
+    {ok, _} = rfc6455_client:open(WS),
+    {ok, <<"o">>} = rfc6455_client:recv(WS),
+    ok = sjs_send(WS, "CONNECT", [{"login", "bad"}, {"passcode", "bad"}]),
+    {ok, {<<"CONNECTED">>, _, <<>>}} = sjs_recv(WS),
+    {close, _} = rfc6455_client:close(WS),
+
+    %% Confirm that if no Authorization header is provided,
+    %% the default STOMP plugin credentials are used. We
+    %% expect an error because the default credentials are
+    %% left undefined.
+    WS2 = rfc6455_client:new("ws://127.0.0.1:" ++ PortStr ++ "/stomp/0/0/websocket", self()),
+    {ok, _} = rfc6455_client:open(WS2),
+    {ok, <<"o">>} = rfc6455_client:recv(WS2),
+    ok = sjs_send(WS2, "CONNECT", [{"login", "bad"}, {"passcode", "bad"}]),
+    {ok, {<<"ERROR">>, _, _}} = sjs_recv(WS2),
+    {close, _} = rfc6455_client:close(WS2).
diff --git a/rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_raw_websocket.erl b/rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_raw_websocket.erl
deleted file mode 100644 (file)
index 9943e54..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-%%   The contents of this file are subject to the Mozilla Public License
-%%   Version 1.1 (the "License"); you may not use this file except in
-%%   compliance with the License. You may obtain a copy of the License at
-%%   http://www.mozilla.org/MPL/
-%%
-%%   Software distributed under the License is distributed on an "AS IS"
-%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%%   License for the specific language governing rights and limitations
-%%   under the License.
-%%
-%%   The Original Code is RabbitMQ Management Console.
-%%
-%%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(rabbit_ws_test_raw_websocket).
-
--include_lib("eunit/include/eunit.hrl").
-
-connection_test() ->
-    WS = rfc6455_client:new("ws://127.0.0.1:15674/stomp/websocket", self()),
-    {ok, _} = rfc6455_client:open(WS),
-    {close, _} = rfc6455_client:close(WS),
-    ok.
-
-
-raw_send(WS, Command, Headers) ->
-    raw_send(WS, Command, Headers, <<>>).
-raw_send(WS, Command, Headers, Body) ->
-    Frame = stomp:marshal(Command, Headers, Body),
-    rfc6455_client:send(WS, Frame).
-
-raw_recv(WS) ->
-    {ok, P} = rfc6455_client:recv(WS),
-    stomp:unmarshal(P).
-
-
-pubsub_test() ->
-    WS = rfc6455_client:new("ws://127.0.0.1:15674/stomp/websocket", self()),
-    {ok, _} = rfc6455_client:open(WS),
-    ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
-
-    {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
-
-    Dst = "/topic/test-" ++ stomp:list_to_hex(binary_to_list(crypto:rand_bytes(8))),
-
-    ok = raw_send(WS, "SUBSCRIBE", [{"destination", Dst},
-                                    {"id", "s0"}]),
-
-    ok = raw_send(WS, "SEND", [{"destination", Dst},
-                              {"content-length", "3"}], <<"a\x00a">>),
-
-    {<<"MESSAGE">>, H, <<"a\x00a">>} = raw_recv(WS),
-    Dst = binary_to_list(proplists:get_value(<<"destination">>, H)),
-
-    {close, _} = rfc6455_client:close(WS),
-    ok.
-
-
-disconnect_test() ->
-    WS = rfc6455_client:new("ws://127.0.0.1:15674/stomp/websocket", self()),
-    {ok, _} = rfc6455_client:open(WS),
-    ok = raw_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
-
-    {<<"CONNECTED">>, _, <<>>} = raw_recv(WS),
-
-    ok = raw_send(WS, "DISCONNECT", []),
-    {close, {1000, _}} = rfc6455_client:recv(WS),
-
-    ok.
diff --git a/rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_sockjs_websocket.erl b/rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_sockjs_websocket.erl
deleted file mode 100644 (file)
index ed0beb3..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-%%   The contents of this file are subject to the Mozilla Public License
-%%   Version 1.1 (the "License"); you may not use this file except in
-%%   compliance with the License. You may obtain a copy of the License at
-%%   http://www.mozilla.org/MPL/
-%%
-%%   Software distributed under the License is distributed on an "AS IS"
-%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%%   License for the specific language governing rights and limitations
-%%   under the License.
-%%
-%%   The Original Code is RabbitMQ Management Console.
-%%
-%%   The Initial Developer of the Original Code is GoPivotal, Inc.
-%%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
-%%
-
--module(rabbit_ws_test_sockjs_websocket).
-
--include_lib("eunit/include/eunit.hrl").
-
-connection_test() ->
-    WS = rfc6455_client:new("ws://127.0.0.1:15674/stomp/0/0/websocket", self()),
-    {ok, _} = rfc6455_client:open(WS),
-    {ok, <<"o">>} = rfc6455_client:recv(WS),
-
-    {close, _} = rfc6455_client:close(WS),
-    ok.
-
-
-sjs_send(WS, Command, Headers) ->
-    sjs_send(WS, Command, Headers, <<>>).
-sjs_send(WS, Command, Headers, Body) ->
-    StompFrame = stomp:marshal(Command, Headers, Body),
-    SockJSFrame = sockjs_json:encode([StompFrame]),
-    rfc6455_client:send(WS, SockJSFrame).
-
-sjs_recv(WS) ->
-    {ok, P} = rfc6455_client:recv(WS),
-    case P of
-        <<"a", JsonArr/binary>> ->
-            {ok, [StompFrame]} = sockjs_json:decode(JsonArr),
-            {ok, stomp:unmarshal(StompFrame)};
-        <<"c", JsonArr/binary>> ->
-            {ok, CloseReason} = sockjs_json:decode(JsonArr),
-            {close, CloseReason}
-    end.
-
-pubsub_test() ->
-    WS = rfc6455_client:new("ws://127.0.0.1:15674/stomp/0/0/websocket", self()),
-    {ok, _} = rfc6455_client:open(WS),
-    {ok, <<"o">>} = rfc6455_client:recv(WS),
-
-    ok = sjs_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
-
-    {ok, {<<"CONNECTED">>, _, <<>>}} = sjs_recv(WS),
-
-    Dst = "/topic/test-" ++ stomp:list_to_hex(binary_to_list(crypto:rand_bytes(8))),
-
-    ok = sjs_send(WS, "SUBSCRIBE", [{"destination", Dst},
-                                    {"id", "s0"}]),
-
-    ok = sjs_send(WS, "SEND", [{"destination", Dst},
-                               {"content-length", "3"}], <<"a\x00a">>),
-
-    {ok, {<<"MESSAGE">>, H, <<"a\x00a">>}} = sjs_recv(WS),
-    Dst = binary_to_list(proplists:get_value(<<"destination">>, H)),
-
-    {close, _} = rfc6455_client:close(WS),
-
-    ok.
-
-
-disconnect_test() ->
-    WS = rfc6455_client:new("ws://127.0.0.1:15674/stomp/0/0/websocket", self()),
-    {ok, _} = rfc6455_client:open(WS),
-    {ok, <<"o">>} = rfc6455_client:recv(WS),
-
-    ok = sjs_send(WS, "CONNECT", [{"login","guest"}, {"passcode", "guest"}]),
-    {ok, {<<"CONNECTED">>, _, <<>>}} = sjs_recv(WS),
-
-    ok = sjs_send(WS, "DISCONNECT", []),
-    {close, [1000, _]} = sjs_recv(WS),
-
-    ok.
-
diff --git a/rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_util.erl b/rabbitmq-server/deps/rabbitmq_web_stomp/test/src/rabbit_ws_test_util.erl
new file mode 100644 (file)
index 0000000..465565c
--- /dev/null
@@ -0,0 +1,38 @@
+%%   The contents of this file are subject to the Mozilla Public License
+%%   Version 1.1 (the "License"); you may not use this file except in
+%%   compliance with the License. You may obtain a copy of the License at
+%%   http://www.mozilla.org/MPL/
+%%
+%%   Software distributed under the License is distributed on an "AS IS"
+%%   basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%%   License for the specific language governing rights and limitations
+%%   under the License.
+%%
+%%   The Original Code is RabbitMQ Management Console.
+%%
+%%   The Initial Developer of the Original Code is GoPivotal, Inc.
+%%   Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(rabbit_ws_test_util).
+
+-export([update_app_env/3, get_web_stomp_port_str/1]).
+
+update_app_env(Config, Key, Val) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+                                      application, set_env,
+                                      [rabbitmq_web_stomp, Key, Val]),
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+                                      application, stop,
+                                      [rabbitmq_web_stomp]),
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+                                      cowboy, stop_listener,
+                                      [http]),
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+                                      application, start,
+                                      [rabbitmq_web_stomp]).
+
+get_web_stomp_port_str(Config) ->
+    Port = rabbit_ct_broker_helpers:get_node_config(Config, 0,
+                                                    tcp_port_web_stomp),
+    integer_to_list(Port).
\ No newline at end of file
index eb5b309216b8bacd919556a4f186cc896f5edb11..0a6f9cd0e21aef52cc72cd0297d481e2806d7915 100644 (file)
 
 -module(rfc6455_client).
 
--export([new/2, open/1, recv/1, send/2, close/1, close/2]).
+-export([new/2, new/3, new/4, open/1, recv/1, send/2, close/1, close/2]).
 
 -record(state, {host, port, addr, path, ppid, socket, data, phase}).
 
 %% --------------------------------------------------------------------------
 
 new(WsUrl, PPid) ->
+    new(WsUrl, PPid, undefined, []).
+
+new(WsUrl, PPid, AuthInfo) ->
+    new(WsUrl, PPid, AuthInfo, []).
+
+new(WsUrl, PPid, AuthInfo, Protocols) ->
     crypto:start(),
     "ws://" ++ Rest = WsUrl,
     [Addr, Path] = split("/", Rest, 1),
@@ -37,7 +43,7 @@ new(WsUrl, PPid) ->
                    path = "/" ++ Path,
                    ppid = PPid},
     spawn(fun () ->
-                  start_conn(State)
+                  start_conn(State, AuthInfo, Protocols)
           end).
 
 open(WS) ->
@@ -79,16 +85,34 @@ close(WS, WsReason) ->
 
 %% --------------------------------------------------------------------------
 
-start_conn(State) ->
+start_conn(State, AuthInfo, Protocols) ->
     {ok, Socket} = gen_tcp:connect(State#state.host, State#state.port,
                                    [binary,
                                     {packet, 0}]),
+
+    AuthHd = case AuthInfo of
+        undefined -> "";
+        _ ->
+            Login    = proplists:get_value(login, AuthInfo),
+            Passcode = proplists:get_value(passcode, AuthInfo),
+            "Authorization: Basic "
+                ++ base64:encode_to_string(Login ++ ":" ++ Passcode)
+                ++ "\r\n"
+    end,
+
+    ProtocolHd = case Protocols of
+        [] -> "";
+        _  -> "Sec-Websocket-Protocol: " ++ string:join(Protocols, ", ")
+    end,
+
     Key = base64:encode_to_string(crypto:rand_bytes(16)),
     gen_tcp:send(Socket,
         "GET " ++ State#state.path ++ " HTTP/1.1\r\n" ++
         "Host: " ++ State#state.addr ++ "\r\n" ++
         "Upgrade: websocket\r\n" ++
         "Connection: Upgrade\r\n" ++
+        AuthHd ++
+        ProtocolHd ++
         "Sec-WebSocket-Key: " ++ Key ++ "\r\n" ++
         "Origin: null\r\n" ++
         "Sec-WebSocket-Version: 13\r\n\r\n"),
diff --git a/rabbitmq-server/deps/rabbitmq_web_stomp_examples/CODE_OF_CONDUCT.md b/rabbitmq-server/deps/rabbitmq_web_stomp_examples/CODE_OF_CONDUCT.md
new file mode 100644 (file)
index 0000000..1f6ef1c
--- /dev/null
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+   without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
index 69a4b4a437fdf25c45c200610d780c7a009146be..45bbcbe62e74c1a8682d2097db8eec955d177b9c 100644 (file)
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
 of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
 
 
-## (Brief) Code of Conduct
+## Code of Conduct
 
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
 
 
 ## Contributor Agreement
index cd9c9b398c8db5da3c8b1f812048abab8030632b..9dc21c658ce3754c15f8c04f0cbd1f117d59019b 100644 (file)
 
       // Stomp.js boilerplate
       if (location.search == '?ws') {
-          var ws = new WebSocket('ws://' + window.location.hostname + ':15674/ws');
+          var client = Stomp.client('ws://' + window.location.hostname + ':15674/ws');
       } else {
           var ws = new SockJS('http://' + window.location.hostname + ':15674/stomp');
+          var client = Stomp.over(ws);
       }
-      var client = Stomp.over(ws);
 
       client.debug = pipe('#second');
 
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
index cd2cc7d37bc31d543219ef0f5a85bff5dfc8f9ea..3a6e7ce06d4c12f3b9c4da46042405a6cc38611e 100644 (file)
@@ -1,6 +1,6 @@
 {application, rabbitmq_web_stomp_examples,
  [{description, "Rabbit WEB-STOMP - examples"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, []},
   {mod, {rabbit_web_stomp_examples_app, []}},
diff --git a/rabbitmq-server/deps/ranch/ranch.d b/rabbitmq-server/deps/ranch/ranch.d
new file mode 100644 (file)
index 0000000..a5dc49d
--- /dev/null
@@ -0,0 +1,4 @@
+src/ranch_ssl.erl:: src/ranch_transport.erl; @touch $@
+src/ranch_tcp.erl:: src/ranch_transport.erl; @touch $@
+
+COMPILE_FIRST += ranch_transport
index ea8b91638db07642b0a25eee8ab0097401f32297..b6d139db603eb6467a17095b014280c1b834fc55 100644 (file)
@@ -223,7 +223,9 @@ extract_info(Req) ->
                                           end
                                   end, {[], Req2},
                                   ['referer', 'x-client-ip', 'x-forwarded-for',
-                                   'x-cluster-client-ip', 'via', 'x-real-ip']),
+                                   'x-cluster-client-ip', 'via', 'x-real-ip',
+    %% RabbitMQ-Web-STOMP needs this header for HTTP Basic Auth.
+                                   'authorization']),
     %% RabbitMQ-Management needs the socket to figure out if it is SSL/TLS.
     Socket  = cowboy_req:get(socket, element(2, Req3)),
     {[{peername, Peer},
index 35a1523ac3fafdf1b60f41178aecb549b8a19f3d..f26889b6408f6269d8a1a2a1a5ece51fbd755ade 100644 (file)
@@ -18,3 +18,7 @@ An example configuration file is provided in the same directory as
 this README. Copy it to /etc/rabbitmq/rabbitmq.config to use it. The
 RabbitMQ server must be restarted after changing the configuration
 file.
+
+An example policy file for HA queues is provided in the same directory
+as this README. Copy and chmod +x it to
+/usr/local/sbin/set_rabbitmq_policy to use it with the Pacemaker OCF RA.
diff --git a/rabbitmq-server/docs/rabbitmq-server.service.example b/rabbitmq-server/docs/rabbitmq-server.service.example
new file mode 100644 (file)
index 0000000..1aa6549
--- /dev/null
@@ -0,0 +1,18 @@
+# systemd unit example
+[Unit]
+Description=RabbitMQ broker
+After=network.target epmd@0.0.0.0.socket
+Wants=network.target epmd@0.0.0.0.socket
+
+[Service]
+Type=notify
+User=rabbitmq
+Group=rabbitmq
+NotifyAccess=all
+TimeoutStartSec=3600
+WorkingDirectory=/var/lib/rabbitmq
+ExecStart=/usr/lib/rabbitmq/bin/rabbitmq-server
+ExecStop=/usr/lib/rabbitmq/bin/rabbitmqctl stop
+
+[Install]
+WantedBy=multi-user.target
index fe24a473764779533246a8096cd28536d453c83b..ec864af6cf9f6183001f3cda807cca73e8851c5f 100644 (file)
             </para>
           </listitem>
         </varlistentry>
+
+        <varlistentry>
+          <term><cmdsynopsis><command>hipe_compile</command> <arg choice="req"><replaceable>directory</replaceable></arg></cmdsynopsis></term>
+          <listitem>
+            <para>
+              Performs HiPE-compilation and caches resulting
+              .beam-files in the given directory.
+            </para>
+            <para>
+              Parent directories are created if necessary. Any
+              existing <command>.beam</command> files from the
+              directory are automatically deleted prior to
+              compilation.
+            </para>
+            <para>
+              To use this precompiled files, you should set
+              <command>RABBITMQ_SERVER_CODE_PATH</command> environment
+              variable to directory specified in
+              <command>hipe_compile</command> invokation.
+            </para>
+            <para role="example-prefix">For example:</para>
+            <screen role="example">rabbitmqctl hipe_compile /tmp/rabbit-hipe/ebin</screen>
+            <para role="example">
+              HiPE-compiles modules and stores them to /tmp/rabbit-hipe/ebin directory.
+            </para>
+          </listitem>
+        </varlistentry>
       </variablelist>
     </refsect2>
 
           </listitem>
         </varlistentry>
 
+        <varlistentry>
+          <term><cmdsynopsis><command>node_health_check</command></cmdsynopsis></term>
+          <listitem>
+            <para>
+              Health check of the RabbitMQ node. Verifies the rabbit application is
+              running, list_queues and list_channels return, and alarms are not set.
+            </para>
+            <para role="example-prefix">For example:</para>
+            <screen role="example">rabbitmqctl node_health_check -n rabbit@stringer</screen>
+            <para role="example">
+              This command performs a health check on the RabbitMQ node.
+            </para>
+          </listitem>
+        </varlistentry>
+
         <varlistentry>
           <term><cmdsynopsis><command>environment</command></cmdsynopsis></term>
           <listitem>
similarity index 99%
rename from rabbitmq-server/scripts/set_rabbitmq_policy.sh
rename to rabbitmq-server/docs/set_rabbitmq_policy.sh.example
index a88b0c417a4d183efda0c684340a501a29ddea88..f46e901ad56bd4cd5eb57580d92af962e9c6e591 100644 (file)
@@ -2,4 +2,3 @@
 # cluster start up. It is a convenient place to set your cluster
 # policy here, for example:
 # ${OCF_RESKEY_ctl} set_policy ha-all "." '{"ha-mode":"all", "ha-sync-mode":"automatic"}' --apply-to all --priority 0
-
index fc2d806f2e82f11bffa3bc023b530169fb08ffb3..efbcf5cd11a59ef1425ead2dfa4b0514e62b437b 100644 (file)
@@ -16,7 +16,7 @@
 
 ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
 
-ERLANG_MK_VERSION = 2.0.0-pre.2-16-gb52203c-dirty
+ERLANG_MK_VERSION = 2.0.0-pre.2-76-g427cfb8
 
 # Core configuration.
 
@@ -84,7 +84,7 @@ all:: deps app rel
 rel::
        $(verbose) :
 
-check:: clean app tests
+check:: tests
 
 clean:: clean-crashdump
 
@@ -421,6 +421,14 @@ pkg_boss_db_fetch = git
 pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
 pkg_boss_db_commit = master
 
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
 PACKAGES += bson
 pkg_bson_name = bson
 pkg_bson_description = BSON documents in Erlang, see bsonspec.org
@@ -885,14 +893,6 @@ pkg_dh_date_fetch = git
 pkg_dh_date_repo = https://github.com/daleharvey/dh_date
 pkg_dh_date_commit = master
 
-PACKAGES += dhtcrawler
-pkg_dhtcrawler_name = dhtcrawler
-pkg_dhtcrawler_description = dhtcrawler is a DHT crawler written in erlang. It can join a DHT network and crawl many P2P torrents.
-pkg_dhtcrawler_homepage = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_fetch = git
-pkg_dhtcrawler_repo = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_commit = master
-
 PACKAGES += dirbusterl
 pkg_dirbusterl_name = dirbusterl
 pkg_dirbusterl_description = DirBuster successor in Erlang
@@ -1139,7 +1139,7 @@ pkg_elvis_description = Erlang Style Reviewer
 pkg_elvis_homepage = https://github.com/inaka/elvis
 pkg_elvis_fetch = git
 pkg_elvis_repo = https://github.com/inaka/elvis
-pkg_elvis_commit = 0.2.4
+pkg_elvis_commit = master
 
 PACKAGES += emagick
 pkg_emagick_name = emagick
@@ -1781,6 +1781,14 @@ pkg_geef_fetch = git
 pkg_geef_repo = https://github.com/carlosmn/geef
 pkg_geef_commit = master
 
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
 PACKAGES += gen_cycle
 pkg_gen_cycle_name = gen_cycle
 pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
@@ -1981,6 +1989,14 @@ pkg_hyper_fetch = git
 pkg_hyper_repo = https://github.com/GameAnalytics/hyper
 pkg_hyper_commit = master
 
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
 PACKAGES += ibrowse
 pkg_ibrowse_name = ibrowse
 pkg_ibrowse_description = Erlang HTTP client
@@ -2501,6 +2517,14 @@ pkg_merl_fetch = git
 pkg_merl_repo = https://github.com/richcarl/merl
 pkg_merl_commit = master
 
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
 PACKAGES += mimetypes
 pkg_mimetypes_name = mimetypes
 pkg_mimetypes_description = Erlang MIME types library
@@ -2733,14 +2757,6 @@ pkg_oauth2_fetch = git
 pkg_oauth2_repo = https://github.com/kivra/oauth2
 pkg_oauth2_commit = master
 
-PACKAGES += oauth2c
-pkg_oauth2c_name = oauth2c
-pkg_oauth2c_description = Erlang OAuth2 Client
-pkg_oauth2c_homepage = https://github.com/kivra/oauth2_client
-pkg_oauth2c_fetch = git
-pkg_oauth2c_repo = https://github.com/kivra/oauth2_client
-pkg_oauth2c_commit = master
-
 PACKAGES += octopus
 pkg_octopus_name = octopus
 pkg_octopus_description = Small and flexible pool manager written in Erlang
@@ -3533,6 +3549,14 @@ pkg_stripe_fetch = git
 pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
 pkg_stripe_commit = v1
 
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
 PACKAGES += surrogate
 pkg_surrogate_name = surrogate
 pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
@@ -3907,7 +3931,7 @@ pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
 pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
 pkg_xref_runner_fetch = git
 pkg_xref_runner_repo = https://github.com/inaka/xref_runner
-pkg_xref_runner_commit = 0.2.0
+pkg_xref_runner_commit = 0.2.3
 
 PACKAGES += yamerl
 pkg_yamerl_name = yamerl
@@ -4092,7 +4116,10 @@ endif
 # While Makefile file could be GNUmakefile or makefile,
 # in practice only Makefile is needed so far.
 define dep_autopatch
-       if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+               $(call dep_autopatch_erlang_mk,$(1)); \
+       elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
                if [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
                        $(call dep_autopatch2,$(1)); \
                elif [ 0 != `grep -ci rebar $(DEPS_DIR)/$(1)/Makefile` ]; then \
@@ -4100,12 +4127,7 @@ define dep_autopatch
                elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i rebar '{}' \;`" ]; then \
                        $(call dep_autopatch2,$(1)); \
                else \
-                       if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
-                               $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
-                               $(call dep_autopatch_erlang_mk,$(1)); \
-                       else \
-                               $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
-                       fi \
+                       $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
                fi \
        else \
                if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
@@ -4117,8 +4139,11 @@ define dep_autopatch
 endef
 
 define dep_autopatch2
+       if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+               $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+       fi; \
        $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
-       if [ -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
+       if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
                $(call dep_autopatch_fetch_rebar); \
                $(call dep_autopatch_rebar,$(1)); \
        else \
@@ -4256,57 +4281,6 @@ define dep_autopatch_rebar.erl
                                Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
                end
        end(),
-       FindFirst = fun(F, Fd) ->
-               case io:parse_erl_form(Fd, undefined) of
-                       {ok, {attribute, _, compile, {parse_transform, PT}}, _} ->
-                               [PT, F(F, Fd)];
-                       {ok, {attribute, _, compile, CompileOpts}, _} when is_list(CompileOpts) ->
-                               case proplists:get_value(parse_transform, CompileOpts) of
-                                       undefined -> [F(F, Fd)];
-                                       PT -> [PT, F(F, Fd)]
-                               end;
-                       {ok, {attribute, _, include, Hrl}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
-                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                       _ ->
-                                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ Hrl, [read]) of
-                                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                                       _ -> [F(F, Fd)]
-                                               end
-                               end;
-                       {ok, {attribute, _, include_lib, "$(1)/include/" ++ Hrl}, _} ->
-                               {ok, HrlFd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]),
-                               [F(F, HrlFd), F(F, Fd)];
-                       {ok, {attribute, _, include_lib, Hrl}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
-                                       {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
-                                       _ -> [F(F, Fd)]
-                               end;
-                       {ok, {attribute, _, import, {Imp, _}}, _} ->
-                               case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(Imp) ++ ".erl", [read]) of
-                                       {ok, ImpFd} -> [Imp, F(F, ImpFd), F(F, Fd)];
-                                       _ -> [F(F, Fd)]
-                               end;
-                       {eof, _} ->
-                               file:close(Fd),
-                               [];
-                       _ ->
-                               F(F, Fd)
-               end
-       end,
-       fun() ->
-               ErlFiles = filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)*.erl"),
-               First0 = lists:usort(lists:flatten([begin
-                       {ok, Fd} = file:open(F, [read]),
-                       FindFirst(FindFirst, Fd)
-               end || F <- ErlFiles])),
-               First = lists:flatten([begin
-                       {ok, Fd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", [read]),
-                       FindFirst(FindFirst, Fd)
-               end || M <- First0, lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)]) ++ First0,
-               Write(["COMPILE_FIRST +=", [[" ", atom_to_list(M)] || M <- First,
-                       lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)], "\n"])
-       end(),
        Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
        Write("\npreprocess::\n"),
        Write("\npre-deps::\n"),
@@ -4419,9 +4393,10 @@ define dep_autopatch_rebar.erl
                                        Output, ": $$\(foreach ext,.c .C .cc .cpp,",
                                                "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
                                        "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
-                                       case filename:extension(Output) of
-                                               [] -> "\n";
-                                               _ -> " -shared\n"
+                                       case {filename:extension(Output), $(PLATFORM)} of
+                                           {[], _} -> "\n";
+                                           {_, darwin} -> "\n";
+                                           _ -> " -shared\n"
                                        end])
                        end,
                        [PortSpec(S) || S <- PortSpecs]
@@ -4490,6 +4465,15 @@ define dep_autopatch_app.erl
        halt()
 endef
 
+define dep_autopatch_appsrc_script.erl
+       AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+       AppSrcScript = AppSrc ++ ".script",
+       Bindings = erl_eval:new_bindings(),
+       {ok, Conf} = file:script(AppSrcScript, Bindings),
+       ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+       halt()
+endef
+
 define dep_autopatch_appsrc.erl
        AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
        AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
@@ -4576,10 +4560,11 @@ $(DEPS_DIR)/$(call dep_name,$1):
                exit 17; \
        fi
        $(verbose) mkdir -p $(DEPS_DIR)
-       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$1)),$1)
-       $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure.ac -o -f $(DEPS_DIR)/$(DEP_NAME)/configure.in ]; then \
-               echo " AUTO  " $(DEP_STR); \
-               cd $(DEPS_DIR)/$(DEP_NAME) && autoreconf -Wall -vif -I m4; \
+       $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+       $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+                       && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+               echo " AUTO  " $(1); \
+               cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
        fi
        - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
                echo " CONF  " $(DEP_STR); \
@@ -4672,28 +4657,10 @@ dtl_verbose = $(dtl_verbose_$(V))
 
 # Core targets.
 
-define erlydtl_compile.erl
-       [begin
-               Module0 = case "$(strip $(DTL_FULL_PATH))" of
-                       "" ->
-                               filename:basename(F, ".dtl");
-                       _ ->
-                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
-                               re:replace(F2, "/",  "_",  [{return, list}, global])
-               end,
-               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
-               case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
-                       ok -> ok;
-                       {ok, _} -> ok
-               end
-       end || F <- string:tokens("$(1)", " ")],
-       halt().
-endef
-
-ifneq ($(wildcard src/),)
-
 DTL_FILES = $(sort $(call core_find,$(DTL_PATH),*.dtl))
 
+ifneq ($(DTL_FILES),)
+
 ifdef DTL_FULL_PATH
 BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(subst /,_,$(DTL_FILES:$(DTL_PATH)%=%))))
 else
@@ -4701,7 +4668,7 @@ BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(notdir $(DTL_FILES
 endif
 
 ifneq ($(words $(DTL_FILES)),0)
-# Rebuild everything when the Makefile changes.
+# Rebuild templates when the Makefile changes.
 $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
        @mkdir -p $(ERLANG_MK_TMP)
        @if test -f $@; then \
@@ -4712,9 +4679,28 @@ $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
 ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
 endif
 
-ebin/$(PROJECT).app:: $(DTL_FILES)
+define erlydtl_compile.erl
+       [begin
+               Module0 = case "$(strip $(DTL_FULL_PATH))" of
+                       "" ->
+                               filename:basename(F, ".dtl");
+                       _ ->
+                               "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
+                               re:replace(F2, "/",  "_",  [{return, list}, global])
+               end,
+               Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+               case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
+                       ok -> ok;
+                       {ok, _} -> ok
+               end
+       end || F <- string:tokens("$(1)", " ")],
+       halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
        $(if $(strip $?),\
-               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?,-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/)))
+               $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?),-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/))
+
 endif
 
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
@@ -4888,51 +4874,79 @@ $(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
 # Erlang and Core Erlang files.
 
 define makedep.erl
+       E = ets:new(makedep, [bag]),
+       G = digraph:new([acyclic]),
        ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
-       Modules = [{filename:basename(F, ".erl"), F} || F <- ErlFiles],
-       Add = fun (Dep, Acc) ->
-               case lists:keyfind(atom_to_list(Dep), 1, Modules) of
-                       {_, DepFile} -> [DepFile|Acc];
-                       false -> Acc
+       Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+       Add = fun (Mod, Dep) ->
+               case lists:keyfind(Dep, 1, Modules) of
+                       false -> ok;
+                       {_, DepFile} ->
+                               {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+                               ets:insert(E, {ModFile, DepFile}),
+                               digraph:add_vertex(G, Mod),
+                               digraph:add_vertex(G, Dep),
+                               digraph:add_edge(G, Mod, Dep)
                end
        end,
-       AddHd = fun (Dep, Acc) ->
-               case {Dep, lists:keymember(Dep, 2, Modules)} of
-                       {"src/" ++ _, false} -> [Dep|Acc];
-                       {"include/" ++ _, false} -> [Dep|Acc];
-                       _ -> Acc
+       AddHd = fun (F, Mod, DepFile) ->
+               case file:open(DepFile, [read]) of
+                       {error, enoent} -> ok;
+                       {ok, Fd} ->
+                               F(F, Fd, Mod),
+                               {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+                               ets:insert(E, {ModFile, DepFile})
                end
        end,
-       CompileFirst = fun (Deps) ->
-               First0 = [case filename:extension(D) of
-                       ".erl" -> filename:basename(D, ".erl");
-                       _ -> []
-               end || D <- Deps],
-               case lists:usort(First0) of
-                       [] -> [];
-                       [[]] -> [];
-                       First -> ["COMPILE_FIRST +=", [[" ", F] || F <- First], "\n"]
-               end
+       Attr = fun
+               (F, Mod, behavior, Dep) -> Add(Mod, Dep);
+               (F, Mod, behaviour, Dep) -> Add(Mod, Dep);
+               (F, Mod, compile, {parse_transform, Dep}) -> Add(Mod, Dep);
+               (F, Mod, compile, Opts) when is_list(Opts) ->
+                       case proplists:get_value(parse_transform, Opts) of
+                               undefined -> ok;
+                               Dep -> Add(Mod, Dep)
+                       end;
+               (F, Mod, include, Hrl) ->
+                       case filelib:is_file("include/" ++ Hrl) of
+                               true -> AddHd(F, Mod, "include/" ++ Hrl);
+                               false ->
+                                       case filelib:is_file("src/" ++ Hrl) of
+                                               true -> AddHd(F, Mod, "src/" ++ Hrl);
+                                               false -> false
+                                       end
+                       end;
+               (F, Mod, include_lib, "$1/include/" ++ Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+               (F, Mod, include_lib, Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+               (F, Mod, import, {Imp, _}) ->
+                       case filelib:is_file("src/" ++ atom_to_list(Imp) ++ ".erl") of
+                               false -> ok;
+                               true -> Add(Mod, Imp)
+                       end;
+               (_, _, _, _) -> ok
        end,
-       Depend = [begin
-               case epp:parse_file(F, ["include/"], []) of
-                       {ok, Forms} ->
-                               Deps = lists:usort(lists:foldl(fun
-                                       ({attribute, _, behavior, Dep}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, behaviour, Dep}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, compile, {parse_transform, Dep}}, Acc) -> Add(Dep, Acc);
-                                       ({attribute, _, file, {Dep, _}}, Acc) -> AddHd(Dep, Acc);
-                                       (_, Acc) -> Acc
-                               end, [], Forms)),
-                               case Deps of
-                                       [] -> "";
-                                       _ -> [F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n", CompileFirst(Deps)]
-                               end;
-                       {error, enoent} ->
-                               []
+       MakeDepend = fun(F, Fd, Mod) ->
+               case io:parse_erl_form(Fd, undefined) of
+                       {ok, {attribute, _, Key, Value}, _} ->
+                               Attr(F, Mod, Key, Value),
+                               F(F, Fd, Mod);
+                       {eof, _} ->
+                               file:close(Fd);
+                       _ ->
+                               F(F, Fd, Mod)
                end
+       end,
+       [begin
+               Mod = list_to_atom(filename:basename(F, ".erl")),
+               {ok, Fd} = file:open(F, [read]),
+               MakeDepend(MakeDepend, Fd, Mod)
        end || F <- ErlFiles],
-       ok = file:write_file("$(1)", Depend),
+       Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+       CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+       ok = file:write_file("$(1)", [
+               [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+               "\nCOMPILE_FIRST +=", [[" ", atom_to_list(CF)] || CF <- CompileFirst], "\n"
+       ]),
        halt()
 endef
 
@@ -5069,6 +5083,11 @@ test-dir:
                $(call core_find,$(TEST_DIR)/,*.erl) -pa ebin/
 endif
 
+ifeq ($(wildcard src),)
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: clean deps test-deps
+       $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+else
 ifeq ($(wildcard ebin/test),)
 test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
 test-build:: clean deps test-deps $(PROJECT).d
@@ -5086,6 +5105,7 @@ clean-test-dir:
 ifneq ($(wildcard $(TEST_DIR)/*.beam),)
        $(gen_verbose) rm -f $(TEST_DIR)/*.beam
 endif
+endif
 
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -5103,11 +5123,14 @@ $(if $(filter-out -Werror,$1),\
                $(shell echo $1 | cut -b 2-)))
 endef
 
+define compat_erlc_opts_to_list
+       [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
 define compat_rebar_config
 {deps, [$(call comma_list,$(foreach d,$(DEPS),\
        {$(call dep_name,$d),".*",{git,"$(call dep_repo,$d)","$(call dep_commit,$d)"}}))]}.
-{erl_opts, [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$(ERLC_OPTS)),\
-       $(call compat_convert_erlc_opts,$o)))]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
 endef
 
 $(eval _compat_rebar_config = $$(compat_rebar_config))
@@ -5126,12 +5149,12 @@ MAN_SECTIONS ?= 3 7
 
 docs:: asciidoc
 
-asciidoc: distclean-asciidoc doc-deps asciidoc-guide asciidoc-manual
+asciidoc: asciidoc-guide asciidoc-manual
 
 ifeq ($(wildcard doc/src/guide/book.asciidoc),)
 asciidoc-guide:
 else
-asciidoc-guide:
+asciidoc-guide: distclean-asciidoc doc-deps
        a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
        a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
 endif
@@ -5139,7 +5162,7 @@ endif
 ifeq ($(wildcard doc/src/manual/*.asciidoc),)
 asciidoc-manual:
 else
-asciidoc-manual:
+asciidoc-manual: distclean-asciidoc doc-deps
        for f in doc/src/manual/*.asciidoc ; do \
                a2x -v -f manpage $$f ; \
        done
@@ -5154,7 +5177,7 @@ install-docs:: install-asciidoc
 install-asciidoc: asciidoc-manual
        for s in $(MAN_SECTIONS); do \
                mkdir -p $(MAN_INSTALL_PATH)/man$$s/ ; \
-               install -g 0 -o 0 -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
+               install -g `id -u` -o `id -g` -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
        done
 endif
 
@@ -5214,6 +5237,8 @@ define bs_appsrc_lib
 ]}.
 endef
 
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
 ifdef SP
 define bs_Makefile
 PROJECT = $p
@@ -5223,17 +5248,21 @@ PROJECT_VERSION = 0.0.1
 # Whitespace to be used when creating files from templates.
 SP = $(SP)
 
-include erlang.mk
 endef
 else
 define bs_Makefile
 PROJECT = $p
-include erlang.mk
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
 endef
 endif
 
 define bs_apps_Makefile
 PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
 include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)/erlang.mk
 endef
 
@@ -5527,6 +5556,7 @@ endif
        $(eval p := $(PROJECT))
        $(eval n := $(PROJECT)_sup)
        $(call render_template,bs_Makefile,Makefile)
+       $(verbose) echo "include erlang.mk" >> Makefile
        $(verbose) mkdir src/
 ifdef LEGACY
        $(call render_template,bs_appsrc,src/$(PROJECT).app.src)
@@ -5540,6 +5570,7 @@ ifneq ($(wildcard src/),)
 endif
        $(eval p := $(PROJECT))
        $(call render_template,bs_Makefile,Makefile)
+       $(verbose) echo "include erlang.mk" >> Makefile
        $(verbose) mkdir src/
 ifdef LEGACY
        $(call render_template,bs_appsrc_lib,src/$(PROJECT).app.src)
@@ -5620,12 +5651,32 @@ list-templates:
 
 C_SRC_DIR ?= $(CURDIR)/c_src
 C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
-C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT).so
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
 C_SRC_TYPE ?= shared
 
 # System type and C compiler/flags.
 
-ifeq ($(PLATFORM),darwin)
+ifeq ($(PLATFORM),msys2)
+       C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+       C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+       C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+       C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+       C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+       C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+       CC = /mingw64/bin/gcc
+       CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+       CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
        CC ?= cc
        CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes
        CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall
@@ -5640,10 +5691,15 @@ else ifeq ($(PLATFORM),linux)
        CXXFLAGS ?= -O3 -finline-functions -Wall
 endif
 
-CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
-CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+ifneq ($(PLATFORM),msys2)
+       CFLAGS += -fPIC
+       CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
 
-LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lerl_interface -lei
 
 # Verbosity.
 
@@ -5680,15 +5736,15 @@ OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
 COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
 COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
 
-app:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
 
-test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
 
-$(C_SRC_OUTPUT): $(OBJECTS)
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
        $(verbose) mkdir -p priv/
        $(link_verbose) $(CC) $(OBJECTS) \
                $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
-               -o $(C_SRC_OUTPUT)
+               -o $(C_SRC_OUTPUT_FILE)
 
 %.o: %.c
        $(COMPILE_C) $(OUTPUT_OPTION) $<
@@ -5705,13 +5761,13 @@ $(C_SRC_OUTPUT): $(OBJECTS)
 clean:: clean-c_src
 
 clean-c_src:
-       $(gen_verbose) rm -f $(C_SRC_OUTPUT) $(OBJECTS)
+       $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
 
 endif
 
 ifneq ($(wildcard $(C_SRC_DIR)),)
 $(C_SRC_ENV):
-       $(verbose) $(ERL) -eval "file:write_file(\"$(C_SRC_ENV)\", \
+       $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
                io_lib:format( \
                        \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
                        \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
@@ -5889,7 +5945,7 @@ endif
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
-.PHONY: ct distclean-ct
+.PHONY: ct apps-ct distclean-ct
 
 # Configuration.
 
@@ -5924,17 +5980,33 @@ CT_RUN = ct_run \
        -logdir $(CURDIR)/logs
 
 ifeq ($(CT_SUITES),)
-ct:
+ct: $(if $(IS_APP),,apps-ct)
 else
-ct: test-build
+ct: test-build $(if $(IS_APP),,apps-ct)
        $(verbose) mkdir -p $(CURDIR)/logs/
        $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
 endif
 
+ifneq ($(ALL_APPS_DIRS),)
+apps-ct:
+       $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app ct IS_APP=1; done
+endif
+
+ifndef t
+CT_EXTRA =
+else
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+endif
+
 define ct_suite_target
 ct-$(1): test-build
        $(verbose) mkdir -p $(CURDIR)/logs/
-       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_OPTS)
+       $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
 endef
 
 $(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
@@ -5953,9 +6025,8 @@ DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
 export DIALYZER_PLT
 
 PLT_APPS ?=
-DIALYZER_DIRS ?= --src -r src
-DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \
-       -Wunmatched_returns # -Wunderspecs
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
 
 # Core targets.
 
@@ -5971,6 +6042,18 @@ help::
 
 # Plugin-specific targets.
 
+define filter_opts.erl
+       Opts = binary:split(<<"$1">>, <<"-">>, [global]),
+       Filtered = lists:reverse(lists:foldl(fun
+               (O = <<"pa ", _/bits>>, Acc) -> [O|Acc];
+               (O = <<"D ", _/bits>>, Acc) -> [O|Acc];
+               (O = <<"I ", _/bits>>, Acc) -> [O|Acc];
+               (_, Acc) -> Acc
+       end, [], Opts)),
+       io:format("~s~n", [[["-", O] || O <- Filtered]]),
+       halt().
+endef
+
 $(DIALYZER_PLT): deps app
        $(verbose) dialyzer --build_plt --apps erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS)
 
@@ -5984,47 +6067,32 @@ dialyze:
 else
 dialyze: $(DIALYZER_PLT)
 endif
-       $(verbose) dialyzer --no_native $(DIALYZER_DIRS) $(DIALYZER_OPTS)
+       $(verbose) dialyzer --no_native `$(call erlang,$(call filter_opts.erl,$(ERLC_OPTS)))` $(DIALYZER_DIRS) $(DIALYZER_OPTS)
 
-# Copyright (c) 2015, Erlang Solutions Ltd.
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
 
-.PHONY: elvis distclean-elvis
+.PHONY: distclean-edoc edoc
 
 # Configuration.
 
-ELVIS_CONFIG ?= $(CURDIR)/elvis.config
-
-ELVIS ?= $(CURDIR)/elvis
-export ELVIS
-
-ELVIS_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis
-ELVIS_CONFIG_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis.config
-ELVIS_OPTS ?=
+EDOC_OPTS ?=
 
 # Core targets.
 
-help::
-       $(verbose) printf "%s\n" "" \
-               "Elvis targets:" \
-               "  elvis       Run Elvis using the local elvis.config or download the default otherwise"
+ifneq ($(wildcard doc/overview.edoc),)
+docs:: edoc
+endif
 
-distclean:: distclean-elvis
+distclean:: distclean-edoc
 
 # Plugin-specific targets.
 
-$(ELVIS):
-       $(gen_verbose) $(call core_http_get,$(ELVIS),$(ELVIS_URL))
-       $(verbose) chmod +x $(ELVIS)
-
-$(ELVIS_CONFIG):
-       $(verbose) $(call core_http_get,$(ELVIS_CONFIG),$(ELVIS_CONFIG_URL))
-
-elvis: $(ELVIS) $(ELVIS_CONFIG)
-       $(verbose) $(ELVIS) rock -c $(ELVIS_CONFIG) $(ELVIS_OPTS)
+edoc: distclean-edoc doc-deps
+       $(gen_verbose) $(ERL) -eval 'edoc:application($(PROJECT), ".", [$(EDOC_OPTS)]), halt().'
 
-distclean-elvis:
-       $(gen_verbose) rm -rf $(ELVIS)
+distclean-edoc:
+       $(gen_verbose) rm -f doc/*.css doc/*.html doc/*.png doc/edoc-info
 
 # Copyright (c) 2014 Dave Cottlehuber <dch@skunkwerks.at>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -6095,11 +6163,12 @@ distclean-escript:
 # Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is contributed to erlang.mk and subject to the terms of the ISC License.
 
-.PHONY: eunit
+.PHONY: eunit apps-eunit
 
 # Configuration
 
 EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
 
 # Core targets.
 
@@ -6121,7 +6190,7 @@ define eunit.erl
                                _ -> ok
                        end
        end,
-       case eunit:test([$(call comma_list,$(1))], [$(EUNIT_OPTS)]) of
+       case eunit:test($1, [$(EUNIT_OPTS)]) of
                ok -> ok;
                error -> halt(2)
        end,
@@ -6133,14 +6202,30 @@ define eunit.erl
        halt()
 endef
 
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(DEPS_DIR)/*/ebin $(APPS_DIR)/*/ebin ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build
+       $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build
+       $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
 EUNIT_EBIN_MODS = $(notdir $(basename $(call core_find,ebin/,*.beam)))
 EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.beam)))
 EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
-       $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),{module,'$(mod)'})
+       $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
 
-eunit: test-build
-       $(gen_verbose) $(ERL) -pa $(TEST_DIR) $(DEPS_DIR)/*/ebin ebin \
-               -eval "$(subst $(newline),,$(subst ",\",$(call eunit.erl,$(EUNIT_MODS))))"
+eunit: test-build $(if $(IS_APP),,apps-eunit)
+       $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit:
+       $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; done
+endif
+endif
 
 # Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
 # This file is part of erlang.mk and subject to the terms of the ISC License.
index 3b59b717f7a90b0799aaee3679d9156b5f6a913d..58400b3bf4b8268db0e886710189b26ec90ac975 100644 (file)
@@ -1,30 +1,33 @@
-rabbit a99d5939c8557384760db3783b59385af7d3db9a stable
-amqp_client 208bc6a37972afd6d21cd3355f80723efe86417c stable
+rabbit ea4e59ee3018bd2824b003ac8f9db3e59c9d3413 rabbitmq_v3_6_5_milestone1
+amqp_client de6e3378391e901bdf1448ded05d6dfc1cfd8a3b rabbitmq_v3_6_4
 cowboy b8e4115eb13488c517d8d8ef33c47d0eaa7838c6 1.0.3
 cowlib 7d8a571b1e50602d701ca203fbf28036b2cf80f5 1.0.1
-mochiweb d024b4a5804fe4e0061c4ed2d1c52bdd168995e9 v2.13.0
-rabbit_common da7529cbee789f36835162220697e55c1a6f5dbb stable
-rabbitmq_amqp1_0 0bec8f2ddae21e1a2b3b00b4ff8fa8fa1dc07ebe rabbitmq_v3_6_1_rc1
-rabbitmq_auth_backend_ldap a864e6dc4bca7e8cdb18482cb00565ee917dd6a9 rabbitmq_v3_6_1_rc1
-rabbitmq_auth_mechanism_ssl b2f9f009af90ddca32a131f2cdfab5d77c7826b8 rabbitmq_v3_6_1_rc1
-rabbitmq_codegen 4c4992c458f74d1c2b6398419c0f6e724cb823e1 rabbitmq_v3_6_1_rc1
-rabbitmq_consistent_hash_exchange d30068db8b87d894d0b0a15d413e9584985f7aa6 rabbitmq_v3_6_1_rc1
-rabbitmq_event_exchange 03a8efb1cd53b32d25156f0f60e6b52f6d273df5 stable
-rabbitmq_federation 3040e494b4fd9201111ee25dc3fbb723a54d885d rabbitmq_v3_6_1_rc1
-rabbitmq_federation_management 8fbc8483b816c1518378c84d9391ccf44ff44caf rabbitmq_v3_6_1_rc2
-rabbitmq_management 34f817d58b82d07f2b64e254ca2f2bd57443aebe rabbitmq_v3_6_1_rc2
-rabbitmq_management_agent 749e57f5b8aaa5320b4ea56657890ace4d636eea rabbitmq_v3_6_1_rc1
-rabbitmq_management_visualiser 70b61685aac2455c4004366a1da2f8896aa7673a rabbitmq_v3_6_1_rc1
-rabbitmq_mqtt e4d29d12eaeadd424640f0d6db1d8fa804f78168 rabbitmq_v3_6_1_rc1
-rabbitmq_recent_history_exchange 9b8068d8cb3a336ad11a349f3916b08b5dcce6db rabbitmq_v3_6_1_rc1
-rabbitmq_sharding 79c0759630af4b9c6bc06f405db7304b70636526 rabbitmq_v3_6_1_rc1
-rabbitmq_shovel 0cfdb35707fd4b011fc0d75006ecc08a71098958 rabbitmq_v3_6_1_rc1
-rabbitmq_shovel_management 0f1e67d9c9ca62a3cfdbf118d5499fe66507bcd6 rabbitmq_v3_6_1_rc1
-rabbitmq_stomp 2e70958ba55852e981c588396d5719c3756a10f0 stable
-rabbitmq_tracing edd796bfdb3482502c7a1afc7dbd1db572d9c984 rabbitmq_v3_6_1_rc1
-rabbitmq_web_dispatch 3bbb422d74c705419b9169429e9294743d83da25 rabbitmq_v3_6_1_rc1
-rabbitmq_web_stomp 928da18104446e6ea4af6825bb8c597651ff8bfa rabbitmq_v3_6_1_rc1
-rabbitmq_web_stomp_examples 7f884a9e3edd44e5fa564e329482786fe0aaebc9 rabbitmq_v3_6_1_rc1
+mochiweb a1ed381e1c4f56c7d1eaee2c2cb725719905a84a master
+rabbit_common 301f3d2e600c6eed1f74cd36d99f32d7782a0630 rabbitmq_v3_6_5_milestone2
+rabbitmq_amqp1_0 4f23a99a9f28f997f34b8f8a161c3d29da4b47b6 rabbitmq_v3_6_4
+rabbitmq_auth_backend_ldap deb2fa94b65169f2c92171dc6ae9f08801e74769 rabbitmq_v3_6_4
+rabbitmq_auth_mechanism_ssl 9fcd68bb9bd1a11759cdec781328a3169a005338 rabbitmq_v3_6_4
+rabbitmq_codegen 4e725d8cafeaca969082beb0b5fa7d48f7f738fe stable
+rabbitmq_consistent_hash_exchange 9926fddd461aca3fa3d97070712f8f6855cc69cd rabbitmq_v3_6_4
+rabbitmq_event_exchange b9ce9662904917756c23fef4883604637dacf77e rabbitmq_v3_6_4
+rabbitmq_federation d613a738604f6274e96eef26b6cba72e3ae63604 rabbitmq_v3_6_4
+rabbitmq_federation_management 4ef2eac742105367600f85e93cc25016c4a272bb rabbitmq_v3_6_4
+rabbitmq_jms_topic_exchange 82919d6815713445c07dc5faf21b8ed56b7c9143 rabbitmq_v3_6_4
+rabbitmq_management 182787783bff4868a63f6a73573a8ead56421cb2 rabbitmq_v3_6_5_milestone2
+rabbitmq_management_agent 7d7b85d188353f6af87b85ec831f129811c3f983 rabbitmq_v3_6_4
+rabbitmq_management_visualiser d135a528cc8ecc2c1101cd9dc331b1af61eb0ee5 rabbitmq_v3_6_4
+rabbitmq_mqtt 601f6d94ff48b9eda46e4e0ddb45f01a879c613d rabbitmq_v3_6_4
+rabbitmq_recent_history_exchange 7d01b5b03fb1a60d314544f8d9f33994a0803196 rabbitmq_v3_6_4
+rabbitmq_sharding 890c8530700a2812a54acd8893083e9a74226949 rabbitmq_v3_6_4
+rabbitmq_shovel e4b1dc712cd74a9d3b36aca84a984114bdaef724 rabbitmq_v3_6_4
+rabbitmq_shovel_management c83f97a8169608dcaff5538f915cf86b518fb718 rabbitmq_v3_6_4
+rabbitmq_stomp d1117a87e06d8066a5b87cee2c41fd86af19b1cb rabbitmq_v3_6_4
+rabbitmq_top 9ceb7d022e4f75325b050da72bd64d7fba6316ab rabbitmq_v3_6_4
+rabbitmq_tracing 6263db57dac49d673b8d87dcee2799d5363ca197 rabbitmq_v3_6_4
+rabbitmq_trust_store 18334b25081cb59dce2725b667a4fb59926b3b0b rabbitmq_v3_6_4
+rabbitmq_web_dispatch 8415fdd5437ef9efc2f26f5b1dd0437b90c2daba rabbitmq_v3_6_4
+rabbitmq_web_stomp 71b8c97775c04e77eb28a8e4a0c942b9165851e5 rabbitmq_v3_6_4
+rabbitmq_web_stomp_examples f271d08a4090ab12ee6b23de7840e07ebc52a5cd rabbitmq_v3_6_4
 ranch a5d2efcde9a34ad38ab89a26d98ea5335e88625a 1.2.1
-sockjs 7776c2a9d882306b01442b4136e226ef3509436a master
+sockjs 7e7112a4935a9aaa89e97954eb612534fa0f6229 master
 webmachine 6b5210c0ed07159f43222255e05a90bbef6c8cbe 
index bc20b4415d11a04c6c8a9c1d92df790f530ccf57..d03f9938e5652573bc72f021d820c2ff2cc5fa0e 100644 (file)
 %% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
 %%
 
--ifdef(use_specs).
+-type callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}.
+-type args() :: any().
+-type members() :: [pid()].
 
--type(callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}).
--type(args() :: any()).
--type(members() :: [pid()]).
-
--spec(joined/2           :: (args(), members())    -> callback_result()).
--spec(members_changed/3  :: (args(), members(),members()) -> callback_result()).
--spec(handle_msg/3       :: (args(), pid(), any()) -> callback_result()).
--spec(handle_terminate/2 :: (args(), term())       -> any()).
-
--endif.
+-spec joined(args(), members())                    -> callback_result().
+-spec members_changed(args(), members(),members()) -> callback_result().
+-spec handle_msg(args(), pid(), any())             -> callback_result().
+-spec handle_terminate(args(), term())             -> any().
index 2e687e2eb8bb93df4ded27739f30da910c3436d8..a0d1ecfdd5192873463c081bd8252a8c98dd40a6 100644 (file)
@@ -34,7 +34,7 @@
 -define(NODE_DEF(Node), {?NODE_OPT, {option, Node}}).
 -define(QUIET_DEF, {?QUIET_OPT, flag}).
 -define(VHOST_DEF, {?VHOST_OPT, {option, "/"}}).
--define(TIMEOUT_DEF, {?TIMEOUT_OPT, {option, "infinity"}}).
+-define(TIMEOUT_DEF, {?TIMEOUT_OPT, {option, use_default}}).
 
 -define(VERBOSE_DEF, {?VERBOSE_OPT, flag}).
 -define(MINIMAL_DEF, {?MINIMAL_OPT, flag}).
@@ -47,8 +47,6 @@
 -define(OFFLINE_DEF, {?OFFLINE_OPT, flag}).
 -define(ONLINE_DEF, {?ONLINE_OPT, flag}).
 
--define(RPC_TIMEOUT, infinity).
-
 %% Subset of standartized exit codes from sysexits.h, see
 %% https://github.com/rabbitmq/rabbitmq-server/issues/396 for discussion.
 -define(EX_OK         ,  0).
index eed26fdac8806687e5ba500e9d9602a49e469dfa..eb9e9e3e030aa3ca626eeabf028b8abe1601733d 100644 (file)
@@ -44,6 +44,8 @@ dep_rabbitmq_event_exchange           = git_rmq rabbitmq-event-exchange $(curren
 dep_rabbitmq_federation               = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_federation_management    = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_java_client              = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client               = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange       = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_lvc                      = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management               = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_management_agent         = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
@@ -53,7 +55,9 @@ dep_rabbitmq_management_visualiser    = git_rmq rabbitmq-management-visualiser $
 dep_rabbitmq_message_timestamp        = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_metronome                = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_mqtt                     = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client              = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_recent_history_exchange  = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp       = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_rtopic_exchange          = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_sharding                 = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_shovel                   = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -62,10 +66,13 @@ dep_rabbitmq_stomp                    = git_rmq rabbitmq-stomp $(current_rmq_ref
 dep_rabbitmq_toke                     = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_top                      = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_tracing                  = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store              = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_test                     = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_dispatch             = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp                = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_web_stomp_examples       = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt                 = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples        = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
 dep_rabbitmq_website                  = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
 dep_sockjs                            = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
 dep_toke                              = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
@@ -97,6 +104,8 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_federation \
                      rabbitmq_federation_management \
                      rabbitmq_java_client \
+                     rabbitmq_jms_client \
+                     rabbitmq_jms_topic_exchange \
                      rabbitmq_lvc \
                      rabbitmq_management \
                      rabbitmq_management_agent \
@@ -106,7 +115,9 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_message_timestamp \
                      rabbitmq_metronome \
                      rabbitmq_mqtt \
+                     rabbitmq_objc_client \
                      rabbitmq_recent_history_exchange \
+                     rabbitmq_routing_node_stamp \
                      rabbitmq_rtopic_exchange \
                      rabbitmq_sharding \
                      rabbitmq_shovel \
@@ -116,7 +127,10 @@ RABBITMQ_COMPONENTS = amqp_client \
                      rabbitmq_toke \
                      rabbitmq_top \
                      rabbitmq_tracing \
+                     rabbitmq_trust_store \
                      rabbitmq_web_dispatch \
+                     rabbitmq_web_mqtt \
+                     rabbitmq_web_mqtt_examples \
                      rabbitmq_web_stomp \
                      rabbitmq_web_stomp_examples \
                      rabbitmq_website
old mode 100644 (file)
new mode 100755 (executable)
index c5d8782..baffce8
@@ -40,6 +40,5 @@ MNESIA_BASE=${SYS_PREFIX}/var/lib/rabbitmq/mnesia
 ENABLED_PLUGINS_FILE=${SYS_PREFIX}/etc/rabbitmq/enabled_plugins
 
 PLUGINS_DIR="${RABBITMQ_HOME}/plugins"
-IO_THREAD_POOL_SIZE=64
 
 CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf
index 27edd0d11eab96d50706fbe1728ea1bb47216611..8fff5ea827a7dcfcd3d9d88420eabd3ef2613b55 100644 (file)
@@ -46,6 +46,4 @@ REM PLUGINS_DIR="${RABBITMQ_HOME}/plugins"
 for /f "delims=" %%F in ("!TDP0!..\plugins") do set PLUGINS_DIR=%%~dpsF%%~nF%%~xF\r
 \r
 REM CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf\r
-if "!RABBITMQ_CONF_ENV_FILE!"=="" (\r
-    set RABBITMQ_CONF_ENV_FILE=!RABBITMQ_BASE!\rabbitmq-env-conf.bat\r
-)\r
+set CONF_ENV_FILE=!RABBITMQ_BASE!\rabbitmq-env-conf.bat\r
old mode 100644 (file)
new mode 100755 (executable)
index dffed03..0fa164c
@@ -62,15 +62,11 @@ RABBITMQ_HOME="$(rmq_realpath "${RABBITMQ_SCRIPTS_DIR}/..")"
 ## Set defaults
 . ${RABBITMQ_SCRIPTS_DIR}/rabbitmq-defaults
 
-## Common defaults
-SERVER_ERL_ARGS="+P 1048576"
+DEFAULT_SCHEDULER_BIND_TYPE="db"
+[ "x" = "x$RABBITMQ_SCHEDULER_BIND_TYPE" ] && RABBITMQ_SCHEDULER_BIND_TYPE=${DEFAULT_SCHEDULER_BIND_TYPE}
 
-# warn about old rabbitmq.conf file, if no new one
-if [ -f /etc/rabbitmq/rabbitmq.conf ] && \
-   [ ! -f ${CONF_ENV_FILE} ] ; then
-    echo -n "WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- "
-    echo "location has moved to ${CONF_ENV_FILE}"
-fi
+## Common defaults
+SERVER_ERL_ARGS="+P 1048576 +t 5000000 +stbt $RABBITMQ_SCHEDULER_BIND_TYPE "
 
 # We save the current value of $RABBITMQ_PID_FILE in case it was set by
 # an init script. If $CONF_ENV_FILE overrides it again, we must ignore
@@ -78,7 +74,9 @@ fi
 saved_RABBITMQ_PID_FILE=$RABBITMQ_PID_FILE
 
 ## Get configuration variables from the configure environment file
-[ -f ${CONF_ENV_FILE} ] && . ${CONF_ENV_FILE} || true
+[ "x" = "x$RABBITMQ_CONF_ENV_FILE" ] && RABBITMQ_CONF_ENV_FILE=${CONF_ENV_FILE}
+
+[ -f ${RABBITMQ_CONF_ENV_FILE} ] && . ${RABBITMQ_CONF_ENV_FILE} || true
 
 if [ "$saved_RABBITMQ_PID_FILE" -a \
      "$saved_RABBITMQ_PID_FILE" != "$RABBITMQ_PID_FILE" ]; then
@@ -182,6 +180,7 @@ DEFAULT_NODE_PORT=5672
 [ "x" = "x$RABBITMQ_MNESIA_BASE" ] && RABBITMQ_MNESIA_BASE=${MNESIA_BASE}
 [ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
 [ "x" = "x$RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS" ] && RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=${SERVER_ADDITIONAL_ERL_ARGS}
+[ "x" = "x$RABBITMQ_SERVER_CODE_PATH" ] && RABBITMQ_SERVER_CODE_PATH=${SERVER_CODE_PATH}
 [ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${MNESIA_DIR}
 [ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}
 
index d5df9ddbd603347947f2e12001a1411f9b17ffcb..5f6ae970e93c72284ef8c8c4532a2d8321252122 100644 (file)
@@ -30,10 +30,14 @@ REM ## Set defaults
 REM . ${SCRIPT_DIR}/rabbitmq-defaults\r
 call "%SCRIPT_DIR%\rabbitmq-defaults.bat"\r
 \r
-REM These common defaults aren't referenced in the batch scripts\r
-REM ## Common defaults\r
-REM SERVER_ERL_ARGS="+P 1048576"\r
-REM\r
+set DEFAULT_SCHEDULER_BIND_TYPE=db\r
+\r
+REM [ "x" = "x$RABBITMQ_SCHEDULER_BIND_TYPE" ] && RABBITMQ_SCHEDULER_BIND_TYPE=${DEFAULT_SCHEDULER_BIND_TYPE}\r
+REM set the default scheduling bind type\r
+if "!RABBITMQ_SCHEDULER_BIND_TYPE!"=="" (\r
+    set RABBITMQ_SCHEDULER_BIND_TYPE=!DEFAULT_SCHEDULER_BIND_TYPE!\r
+)\r
+\r
 REM # warn about old rabbitmq.conf file, if no new one\r
 REM if [ -f /etc/rabbitmq/rabbitmq.conf ] && \\r
 REM    [ ! -f ${CONF_ENV_FILE} ] ; then\r
@@ -41,12 +45,15 @@ REM     echo -n "WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- "
 REM     echo "location has moved to ${CONF_ENV_FILE}"\r
 REM fi\r
 \r
-REM ERL_ARGS aren't referenced in the batch scripts\r
 REM Common defaults\r
-REM set SERVER_ERL_ARGS=+P 1048576\r
+set SERVER_ERL_ARGS=+P 1048576 +t 5000000 +stbt !RABBITMQ_SCHEDULER_BIND_TYPE! \r
 \r
 REM ## Get configuration variables from the configure environment file\r
 REM [ -f ${CONF_ENV_FILE} ] && . ${CONF_ENV_FILE} || true\r
+if "!RABBITMQ_CONF_ENV_FILE!"=="" (\r
+    set RABBITMQ_CONF_ENV_FILE=!CONF_ENV_FILE!\r
+)\r
+\r
 if exist "!RABBITMQ_CONF_ENV_FILE!" (\r
     call "!RABBITMQ_CONF_ENV_FILE!"\r
 )\r
@@ -147,7 +154,9 @@ if "!RABBITMQ_DIST_PORT!"=="" (
 )\r
 \r
 REM [ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS}\r
-REM No Windows equivalent\r
+if "!RABBITMQ_SERVER_ERL_ARGS!"=="" (\r
+    set RABBITMQ_SERVER_ERL_ARGS=!SERVER_ERL_ARGS!\r
+)\r
 \r
 REM [ "x" = "x$RABBITMQ_CONFIG_FILE" ] && RABBITMQ_CONFIG_FILE=${CONFIG_FILE}\r
 if "!RABBITMQ_CONFIG_FILE!"=="" (\r
@@ -382,8 +391,8 @@ goto :filter_paths_done
 set paths=%1\r
 set paths=%paths:"=%\r
 for /f "tokens=1* delims=;" %%a in ("%paths%") do (\r
-    if not "%%a" == "" call :filter_path %%a\r
-    if not "%%b" == "" call :filter_paths %%b\r
+    if not "%%a" == "" call :filter_path "%%a"\r
+    if not "%%b" == "" call :filter_paths "%%b"\r
 )\r
 set paths=\r
 exit /b\r
index 548a085434ef759d51c4ba5acccb9da39791e115..74337311cd116e13fe248e89588f46c4db9db3fa 100755 (executable)
@@ -47,7 +47,7 @@ case "$(uname -s)" in
                    exit $EX_CANTCREAT
                fi
                if ! echo $$ > ${RABBITMQ_PID_FILE}; then
-                   # Bettern diagnostics - otherwise the only report in logs is about failed 'echo'
+                   # Better diagnostics - otherwise the only report in logs is about failed 'echo'
                    # command, but without any other details: neither what script has failed nor what
                    # file output was redirected to.
                    echo "Failed to write pid file: ${RABBITMQ_PID_FILE}"
@@ -58,8 +58,13 @@ esac
 
 RABBITMQ_EBIN_ROOT="${RABBITMQ_HOME}/ebin"
 
+[ "$NOTIFY_SOCKET" ] && RUNNING_UNDER_SYSTEMD=true
+
 set +e
 
+# NOTIFY_SOCKET is needed here to prevent epmd from impersonating the
+# success of our startup sequence to systemd.
+NOTIFY_SOCKET= \
 RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \
 RABBITMQ_DIST_PORT=$RABBITMQ_DIST_PORT \
     ${ERL_DIR}erl -pa "$RABBITMQ_EBIN_ROOT" \
@@ -112,10 +117,31 @@ fi
 # there is no other way of preventing their expansion.
 set -f
 
+# Lazy initialization of threed pool size - if it wasn't set
+# explicitly. This parameter is only needed when server is starting,
+# so it makes no sense to do this calculations in rabbitmq-env or
+# rabbitmq-defaults scripts.
+ensure_thread_pool_size() {
+    if [ -z "${RABBITMQ_IO_THREAD_POOL_SIZE}" ]; then
+        RABBITMQ_IO_THREAD_POOL_SIZE=$(
+            ${ERL_DIR}erl -pa "$RABBITMQ_EBIN_ROOT" \
+                      -boot "${CLEAN_BOOT_FILE}" \
+                      -noinput \
+                      -s rabbit_misc report_default_thread_pool_size
+        )
+    fi
+}
+
 start_rabbitmq_server() {
+    # "-pa ${RABBITMQ_SERVER_CODE_PATH}" should be the very first
+    # command-line argument. In case of using cached HiPE-compilation,
+    # this will allow for compiled versions of erlang built-in modules
+    # (e.g. lists) to be loaded.
+    ensure_thread_pool_size
+    check_start_params &&
     RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \
     exec ${ERL_DIR}erl \
-        -pa ${RABBITMQ_EBIN_ROOT} \
+        -pa ${RABBITMQ_SERVER_CODE_PATH} ${RABBITMQ_EBIN_ROOT} \
         ${RABBITMQ_START_RABBIT} \
         ${RABBITMQ_NAME_TYPE} ${RABBITMQ_NODENAME} \
         -boot "${SASL_BOOT_FILE}" \
@@ -151,7 +177,39 @@ stop_rabbitmq_server() {
     fi
 }
 
-if [ 'x' = "x$RABBITMQ_ALLOW_INPUT" -a -z "$detached" ]; then
+check_start_params() {
+    check_not_empty RABBITMQ_BOOT_MODULE
+    check_not_empty RABBITMQ_NAME_TYPE
+    check_not_empty RABBITMQ_NODENAME
+    check_not_empty SASL_BOOT_FILE
+    check_not_empty RABBITMQ_IO_THREAD_POOL_SIZE
+}
+
+check_not_empty() {
+    local name="${1:?}"
+    local value
+    eval value=\$$name
+    if [ -z "$value" ]; then
+        echo "Error: ENV variable should be defined: $1.
+       Please check rabbitmq-env, rabbitmq-defaults, and ${RABBITMQ_CONF_ENV_FILE} script files"
+        exit 78
+    fi
+}
+
+if [ "$RABBITMQ_ALLOW_INPUT" -o "$RUNNING_UNDER_SYSTEMD" -o "$detached" ]; then
+    # Run erlang VM directly, completely replacing current shell
+    # process - so the pid file written in the code above will be
+    # valid (unless detached, which is also handled in the code
+    # above).
+    #
+    # And also this is the correct mode to run the broker under
+    # systemd - there is no need in a proxy process that converts
+    # signals to graceful shutdown command, the unit file should already
+    # contain instructions for graceful shutdown. Also by removing
+    # this additional process we could simply use value returned by
+    # `os:getpid/0` for a systemd ready notification.
+    start_rabbitmq_server "$@"
+else
     # When RabbitMQ runs in the foreground but the Erlang shell is
     # disabled, we setup signal handlers to stop RabbitMQ properly. This
     # is at least useful in the case of Docker.
@@ -160,7 +218,7 @@ if [ 'x' = "x$RABBITMQ_ALLOW_INPUT" -a -z "$detached" ]; then
     RABBITMQ_SERVER_START_ARGS="${RABBITMQ_SERVER_START_ARGS} +B i"
 
     # Signal handlers. They all stop RabbitMQ properly (using
-    # rabbitmqctl stop). Depending on the signal, this script will exwit
+    # rabbitmqctl stop). Depending on the signal, this script will exit
     # with a non-zero error code:
     #   SIGHUP SIGTERM SIGTSTP
     #     They are considered a normal process termination, so the script
@@ -176,6 +234,4 @@ if [ 'x' = "x$RABBITMQ_ALLOW_INPUT" -a -z "$detached" ]; then
     # Block until RabbitMQ exits or a signal is caught.
     # Waits for last command (which is start_rabbitmq_server)
     wait $!
-else
-    start_rabbitmq_server "$@"
 fi
index 16fba442904ecc22df2bc2aaebd85b27b5d91136..cd07d0c1b0d32f5c3f0cf30ba41407e3fa045693 100755 (executable)
@@ -13,8 +13,8 @@
 #
 # See usage() function below for more details ...
 #
-# Note that the script uses set_rabbitmq_policy.sh script located in the
-# same directory to setup RabbitMQ policies.
+# Note that the script uses an external file to setup RabbitMQ policies
+# so make sure to create it from an example shipped with the package.
 #
 #######################################################################
 # Initialization:
@@ -46,6 +46,7 @@ OCF_RESKEY_erlang_cookie_file_default="/var/lib/rabbitmq/.erlang.cookie"
 OCF_RESKEY_use_fqdn_default=false
 OCF_RESKEY_fqdn_prefix_default=""
 OCF_RESKEY_max_rabbitmqctl_timeouts_default=3
+OCF_RESKEY_policy_file_default="/usr/local/sbin/set_rabbitmq_policy"
 
 : ${HA_LOGTAG="lrmd"}
 : ${HA_LOGFACILITY="daemon"}
@@ -66,6 +67,7 @@ OCF_RESKEY_max_rabbitmqctl_timeouts_default=3
 : ${OCF_RESKEY_use_fqdn=${OCF_RESKEY_use_fqdn_default}}
 : ${OCF_RESKEY_fqdn_prefix=${OCF_RESKEY_fqdn_prefix_default}}
 : ${OCF_RESKEY_max_rabbitmqctl_timeouts=${OCF_RESKEY_max_rabbitmqctl_timeouts_default}}
+: ${OCF_RESKEY_policy_file=${OCF_RESKEY_policy_file_default}}
 
 #######################################################################
 
@@ -288,6 +290,14 @@ If too many timeouts happen in a raw, the monitor call will return with error.
 <content type="string" default="${OCF_RESKEY_max_rabbitmqctl_timeouts_default}" />
 </parameter>
 
+<parameter name="policy_file" unique="0" required="0">
+<longdesc lang="en">
+A path to the shell script to setup RabbitMQ policies
+</longdesc>
+<shortdesc lang="en">A policy file path</shortdesc>
+<content type="string" default="${OCF_RESKEY_policy_file_default}" />
+</parameter>
+
 $EXTENDED_OCF_PARAMS
 
 </parameters>
@@ -613,7 +623,7 @@ rmq_setup_env() {
         fi
     done
 
-    export LL="${OCF_RESOURCE_INSTANCE}:"
+    export LL="${OCF_RESOURCE_INSTANCE}[$$]:"
     update_cookie
 }
 
@@ -668,8 +678,8 @@ reset_mnesia() {
     # remove mnesia files, if required
     if $make_amnesia ; then
         kill_rmq_and_remove_pid
-        ocf_run rm -rf "${MNESIA_FILES}/*"
-        ocf_log warn "${LH} Mnesia files appear corrupted and have been removed."
+        ocf_run rm -rf "${MNESIA_FILES}"
+        ocf_log warn "${LH} Mnesia files appear corrupted and have been removed from ${MNESIA_FILES}."
     fi
     # always return OCF SUCCESS
     return $OCF_SUCCESS
@@ -1286,6 +1296,7 @@ start_rmq_server_app() {
 get_status() {
     local what="${1:-kernel}"
     local rc=$OCF_NOT_RUNNING
+    local LH="${LL} get_status():"
     local body
     local beam_running
 
@@ -1296,11 +1307,11 @@ get_status() {
     beam_running=$?
     # report not running only if the which_applications() reported an error AND the beam is not running
     if [ $rc -ne 0 -a $beam_running -ne 0 ] ; then
-        ocf_log info "get_status() failed with code ${rc}. Command output: ${body}"
+        ocf_log info "${LH} failed with code ${rc}. Command output: ${body}"
         return $OCF_NOT_RUNNING
     # return a generic error, if there were errors and beam is found running
     elif [ $rc -ne 0 ] ; then
-        ocf_log info "get_status() found the beam process running but failed with code ${rc}. Command output: ${body}"
+        ocf_log info "${LH} found the beam process running but failed with code ${rc}. Command output: ${body}"
         return $OCF_ERR_GENERIC
     fi
 
@@ -1310,7 +1321,7 @@ get_status() {
         echo "$body" | grep "\{${what}," 2>&1 > /dev/null && rc=$OCF_SUCCESS
 
         if [ $rc -ne $OCF_SUCCESS ] ; then
-            ocf_log info "get_status(): app ${what} was not found in command output: ${body}"
+            ocf_log info "${LH} app ${what} was not found in command output: ${body}"
         fi
     fi
 
@@ -1452,6 +1463,7 @@ get_monitor() {
                 # Rabbit is running but is not connected to master
                 # Failing to avoid split brain
                 ocf_log err "${LH} rabbit node is running out of the cluster"
+                stop_server_process
                 rc=$OCF_ERR_GENERIC
             fi
         fi
@@ -1468,6 +1480,7 @@ get_monitor() {
 
             if [ -n "$master_name" ]; then
                 ocf_log info "${LH} master exists and rabbit app is not running. Exiting to be restarted by pacemaker"
+                stop_server_process
                 rc=$OCF_ERR_GENERIC
             fi
         fi
@@ -1578,6 +1591,10 @@ get_monitor() {
         fi
     fi
 
+    if ! is_cluster_status_ok ; then
+        rc=$OCF_ERR_GENERIC
+    fi
+
     # Check if the list of all queues is available,
     # Also report some queues stats and total virtual memory.
     local queues
@@ -1617,6 +1634,36 @@ get_monitor() {
     return $rc
 }
 
+ocf_update_private_attr() {
+    local attr_name="${1:?}"
+    local attr_value="${2:?}"
+    ocf_run attrd_updater -p --name "$attr_name" --update "$attr_value"
+}
+
+rabbitmqctl_with_timeout_check() {
+    local command="${1:?}"
+    local timeout_attr_name="${2:?}"
+
+    su_rabbit_cmd "${OCF_RESKEY_ctl} $command"
+    local rc=$?
+
+    check_timeouts $rc $timeout_attr_name "$command"
+    local has_timed_out=$?
+
+    case "$has_timed_out" in
+        0)
+            return $rc;;
+        1)
+            return 0;;
+        2)
+            return 1;;
+    esac
+}
+
+is_cluster_status_ok() {
+    local LH="${LH}: is_cluster_status_ok:"
+    rabbitmqctl_with_timeout_check cluster_status rabbit_cluster_status_timeouts > /dev/null 2>&1
+}
 
 action_monitor() {
     local rc=$OCF_ERR_GENERIC
@@ -1657,9 +1704,12 @@ action_start() {
         return $OCF_SUCCESS
     fi
 
-    ocf_run attrd_updater -p --name 'rabbit_list_channels_timeouts' --update '0'
-    ocf_run attrd_updater -p --name 'rabbit_get_alarms_timeouts' --update '0'
-    ocf_run attrd_updater -p --name 'rabbit_list_queues_timeouts' --update '0'
+    local attrs_to_zero="rabbit_list_channels_timeouts rabbit_get_alarms_timeouts rabbit_list_queues_timeouts rabbit_cluster_status_timeouts"
+    local attr_name_to_reset
+    for attr_name_to_reset in $attrs_to_zero; do
+        ocf_update_private_attr $attr_name_to_reset 0
+    done
+
     ocf_log info "${LH} Deleting start time attribute"
     ocf_run crm_attribute -N $THIS_PCMK_NODE -l reboot --name 'rabbit-start-time' --delete
     ocf_log info "${LH} Deleting master attribute"
@@ -2097,8 +2147,7 @@ action_promote() {
                         exit $OCF_FAILED_MASTER
                     fi
 
-                    local set_policy_path="$(dirname $0)/set_rabbitmq_policy.sh"
-                    [ -f $set_policy_path ] && . $set_policy_path
+                    [ -f "${OCF_RESKEY_policy_file}" ] && . "${OCF_RESKEY_policy_file}"
 
                     # create timestamp file
                     nowtime="$(now)"
index d80bb385d56d2ef7211cf065e7e0570bce598a4a..585a830efa2d5aa280faf8c03afb6a4b03a4319d 100644 (file)
@@ -98,7 +98,18 @@ if "!RABBITMQ_NODE_ONLY!"=="" (
 )\r
 \r
 if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" (\r
-    set RABBITMQ_IO_THREAD_POOL_ARG=30\r
+    set RABBITMQ_IO_THREAD_POOL_SIZE=64\r
+) \r
+\r
+\r
+set ENV_OK=true\r
+CALL :check_not_empty "RABBITMQ_BOOT_MODULE" !RABBITMQ_BOOT_MODULE! \r
+CALL :check_not_empty "RABBITMQ_NAME_TYPE" !RABBITMQ_NAME_TYPE!\r
+CALL :check_not_empty "RABBITMQ_NODENAME" !RABBITMQ_NODENAME!\r
+\r
+\r
+if "!ENV_OK!"=="false" (\r
+    EXIT /b 78\r
 )\r
 \r
 "!ERLANG_HOME!\bin\erl.exe" ^\r
@@ -109,9 +120,8 @@ if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" (
 !RABBITMQ_NAME_TYPE! !RABBITMQ_NODENAME! ^\r
 +W w ^\r
 +A "!RABBITMQ_IO_THREAD_POOL_SIZE!" ^\r
-+P 1048576 ^\r
-!RABBITMQ_LISTEN_ARG! ^\r
 !RABBITMQ_SERVER_ERL_ARGS! ^\r
+!RABBITMQ_LISTEN_ARG! ^\r
 -kernel inet_default_connect_options "[{nodelay, true}]" ^\r
 !RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^\r
 -sasl errlog_type error ^\r
@@ -129,5 +139,16 @@ if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" (
 !RABBITMQ_DIST_ARG! ^\r
 !STAR!\r
 \r
+EXIT /B 0\r
+\r
+:check_not_empty\r
+if "%~2"=="" (\r
+    ECHO "Error: ENV variable should be defined: %1. Please check rabbitmq-env and rabbitmq-defaults, and !RABBITMQ_CONF_ENV_FILE! script files. Check also your Environment Variables settings"\r
+    set ENV_OK=false\r
+    EXIT /B 78 \r
+    )\r
+EXIT /B 0\r
+\r
 endlocal\r
 endlocal\r
+\r
index 59425540e672d0b2073bd76d7e0ab50c42bf9505..f8a8d5a4648169a8e683b2be3b5e2d88fabfe035 100644 (file)
@@ -104,6 +104,16 @@ if not exist "!RABBITMQ_BASE!" (
     echo Creating base directory !RABBITMQ_BASE! & md "!RABBITMQ_BASE!"\r
 )\r
 \r
+set ENV_OK=true\r
+CALL :check_not_empty "RABBITMQ_BOOT_MODULE" !RABBITMQ_BOOT_MODULE! \r
+CALL :check_not_empty "RABBITMQ_NAME_TYPE" !RABBITMQ_NAME_TYPE!\r
+CALL :check_not_empty "RABBITMQ_NODENAME" !RABBITMQ_NODENAME!\r
+\r
+\r
+if "!ENV_OK!"=="false" (\r
+    EXIT /b 78\r
+)\r
+\r
 "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" list !RABBITMQ_SERVICENAME! 2>NUL 1>NUL\r
 if errorlevel 1 (\r
     "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" add !RABBITMQ_SERVICENAME! -internalservicename !RABBITMQ_SERVICENAME!\r
@@ -131,6 +141,12 @@ if ERRORLEVEL 3 (
     set RABBITMQ_DIST_ARG=-kernel inet_dist_listen_min !RABBITMQ_DIST_PORT! -kernel inet_dist_listen_max !RABBITMQ_DIST_PORT!\r
 )\r
 \r
+    REM Try to create config file, if it doesn't exist\r
+    REM It still can fail to be created, but at least not for default install\r
+if not exist "!RABBITMQ_CONFIG_FILE!.config" (\r
+    echo []. > !RABBITMQ_CONFIG_FILE!.config\r
+)\r
+\r
 if exist "!RABBITMQ_CONFIG_FILE!.config" (\r
     set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!"\r
 ) else (\r
@@ -150,7 +166,11 @@ if "!RABBITMQ_NODE_ONLY!"=="" (
 )\r
 \r
 if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" (\r
-    set RABBITMQ_IO_THREAD_POOL_SIZE=30\r
+    set RABBITMQ_IO_THREAD_POOL_SIZE=64\r
+)\r
+\r
+if "!RABBITMQ_SERVICE_RESTART!"=="" (\r
+    set RABBITMQ_SERVICE_RESTART=restart\r
 )\r
 \r
 set ERLANG_SERVICE_ARGUMENTS= ^\r
@@ -160,9 +180,8 @@ set ERLANG_SERVICE_ARGUMENTS= ^
 !RABBITMQ_CONFIG_ARG! ^\r
 +W w ^\r
 +A "!RABBITMQ_IO_THREAD_POOL_SIZE!" ^\r
-+P 1048576 ^\r
-!RABBITMQ_LISTEN_ARG! ^\r
 !RABBITMQ_SERVER_ERL_ARGS! ^\r
+!RABBITMQ_LISTEN_ARG! ^\r
 -kernel inet_default_connect_options "[{nodelay,true}]" ^\r
 !RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS! ^\r
 -sasl errlog_type error ^\r
@@ -184,7 +203,10 @@ set ERLANG_SERVICE_ARGUMENTS= ^
 set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\!\r
 set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:"=\"!\r
 \r
+\r
+\r
 "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" set !RABBITMQ_SERVICENAME! ^\r
+-onfail !RABBITMQ_SERVICE_RESTART! ^\r
 -machine "!ERLANG_SERVICE_MANAGER_PATH!\erl.exe" ^\r
 -env ERL_CRASH_DUMP="!RABBITMQ_BASE:\=/!/erl_crash.dump" ^\r
 -env ERL_LIBS="!ERL_LIBS!" ^\r
@@ -206,5 +228,15 @@ goto END
 \r
 :END\r
 \r
+EXIT /B 0\r
+\r
+:check_not_empty\r
+if "%~2"=="" (\r
+    ECHO "Error: ENV variable should be defined: %1. Please check rabbitmq-env, rabbitmq-default, and !RABBITMQ_CONF_ENV_FILE! script files. Check also your Environment Variables settings"\r
+    set ENV_OK=false\r
+    EXIT /B 78 \r
+    )\r
+EXIT /B 0\r
+\r
 endlocal\r
 endlocal\r
index 3705b9a97924f471b60ee2b7b24db94996bc1197..2336c3d466130828e441444951b4427708c6d8c6 100755 (executable)
@@ -30,7 +30,7 @@ fi
 RABBITMQ_USE_LONGNAME=${RABBITMQ_USE_LONGNAME} \
 exec ${ERL_DIR}erl \
     -pa "${RABBITMQ_HOME}/ebin" \
-    -noinput \
+    -noinput +B \
     -hidden \
     ${RABBITMQ_CTL_ERL_ARGS} \
     -boot "${CLEAN_BOOT_FILE}" \
diff --git a/rabbitmq-server/scripts/travis_test_ocf_ra.sh b/rabbitmq-server/scripts/travis_test_ocf_ra.sh
new file mode 100644 (file)
index 0000000..e8f9a74
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/sh -eux
+# Prepare and run a smoke test against the RabbitMQ OCF RA only if
+# the scripts/rabbitmq-server-ha.ocf has changes
+if ! git diff HEAD~ --name-only | grep -q scripts/rabbitmq-server-ha.ocf
+then
+  exit 0
+fi
+
+export VAGRANT_VERSION=1.8.1
+export DOCKER_IMAGE=bogdando/rabbitmq-cluster-ocf-wily
+export UPLOAD_METHOD=none
+export DOCKER_MOUNTS="$(pwd)/scripts/rabbitmq-server-ha.ocf:/tmp/rabbitmq-server-ha"
+
+# Install vagrant and requirements
+sudo apt-get install -qq git wget
+wget --no-verbose https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}_x86_64.deb
+sudo dpkg -i --force-all ./vagrant_${VAGRANT_VERSION}_x86_64.deb
+vagrant plugin install vagrant-triggers
+
+# Update docker and prepare images
+sudo apt-get update
+sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install --only-upgrade docker-engine
+sudo service docker restart
+docker pull $DOCKER_IMAGE
+
+# Prepare and run a smoke test for a rabbitmq cluster by the OCF RA
+git clone https://github.com/bogdando/rabbitmq-cluster-ocf-vagrant.git
+cd ./rabbitmq-cluster-ocf-vagrant
+vagrant up --provider docker
+docker exec -it n1 /bin/bash /vagrant/vagrant_script/test_rabbitcluster.sh rabbit@n1 rabbit@n2
index 8388207d52a1fd1dc081987933751ed1de623085..2986f356f5e6f9cb09fba52eaa24f2388137c454 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
--spec(run/0 :: () -> 'ok').
--spec(gc/0 :: () -> 'ok').
-
--endif.
+-spec start_link() -> {'ok', pid()} | {'error', any()}.
+-spec run() -> 'ok'.
+-spec gc() -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
index fc3bc2890f0103792b3dbeab6355ed48d1ab6fc7..778137c1c72da055f3cb2a90c1d00c5a4414f7b4 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([monitor_ref/0]).
 
--type(monitor_ref() :: reference() | {atom(), pid()}).
--type(fun_or_mfa(A) :: fun ((pid()) -> A) | {atom(), atom(), [any()]}).
+-type monitor_ref() :: reference() | {atom(), pid()}.
+-type fun_or_mfa(A) :: fun ((pid()) -> A) | {atom(), atom(), [any()]}.
 
--spec(start_link/1 ::
-        (non_neg_integer()) -> {'ok', pid()} | ignore | {'error', any()}).
--spec(invoke/2 :: ( pid(),  fun_or_mfa(A)) -> A;
-                  ([pid()], fun_or_mfa(A)) -> {[{pid(), A}],
-                                               [{pid(), term()}]}).
--spec(invoke_no_result/2 :: (pid() | [pid()], fun_or_mfa(any())) -> 'ok').
--spec(monitor/2 :: ('process', pid()) -> monitor_ref()).
--spec(demonitor/1 :: (monitor_ref()) -> 'true').
+-spec start_link
+        (non_neg_integer()) -> {'ok', pid()} | ignore | {'error', any()}.
+-spec invoke
+        ( pid(),  fun_or_mfa(A)) -> A;
+        ([pid()], fun_or_mfa(A)) -> {[{pid(), A}], [{pid(), term()}]}.
+-spec invoke_no_result(pid() | [pid()], fun_or_mfa(any())) -> 'ok'.
+-spec monitor('process', pid()) -> monitor_ref().
+-spec demonitor(monitor_ref()) -> 'true'.
 
--spec(call/2 ::
+-spec call
         ( pid(),  any()) -> any();
-        ([pid()], any()) -> {[{pid(), any()}], [{pid(), term()}]}).
--spec(cast/2 :: (pid() | [pid()], any()) -> 'ok').
-
--endif.
+        ([pid()], any()) -> {[{pid(), any()}], [{pid(), term()}]}.
+-spec cast(pid() | [pid()], any()) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
index 84ca9553ec1492e2620cba368d1bce8037984827..ba0964f9dd6ce9980fdf3217539e376099db76b0 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/1 :: (integer()) -> rabbit_types:ok_pid_or_error()).
--spec(count/1 :: ([node()]) -> integer()).
-
--endif.
+-spec start_link(integer()) -> rabbit_types:ok_pid_or_error().
+-spec count([node()]) -> integer().
 
 %%----------------------------------------------------------------------------
 
index 99133e75b6ffd11472bb8fb636a2f5e647e863d6..a2232c06874fbbbe5da2485f42830327f147ada0 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([?MODULE/0]).
 
--opaque(?MODULE()  :: {gb_trees:tree(), gb_trees:tree()}).
-
--type(pk()         :: any()).
--type(sk()         :: any()).
--type(val()        :: any()).
--type(kv()         :: {pk(), val()}).
-
--spec(empty/0      :: () -> ?MODULE()).
--spec(insert/4     :: (pk(), [sk()], val(), ?MODULE()) -> ?MODULE()).
--spec(take/3       :: ([pk()], sk(), ?MODULE()) -> {[kv()], ?MODULE()}).
--spec(take/2       :: (sk(), ?MODULE()) -> {[kv()], ?MODULE()}).
--spec(take_all/2   :: (sk(), ?MODULE()) -> {[kv()], ?MODULE()}).
--spec(drop/2       :: (pk(), ?MODULE()) -> ?MODULE()).
--spec(is_defined/2 :: (sk(), ?MODULE()) -> boolean()).
--spec(is_empty/1   :: (?MODULE()) -> boolean()).
--spec(smallest/1   :: (?MODULE()) -> kv()).
--spec(size/1       :: (?MODULE()) -> non_neg_integer()).
-
--endif.
+-opaque ?MODULE()  :: {gb_trees:tree(), gb_trees:tree()}.
+
+-type pk()         :: any().
+-type sk()         :: any().
+-type val()        :: any().
+-type kv()         :: {pk(), val()}.
+
+-spec empty() -> ?MODULE().
+-spec insert(pk(), [sk()], val(), ?MODULE()) -> ?MODULE().
+-spec take([pk()], sk(), ?MODULE()) -> {[kv()], ?MODULE()}.
+-spec take(sk(), ?MODULE()) -> {[kv()], ?MODULE()}.
+-spec take_all(sk(), ?MODULE()) -> {[kv()], ?MODULE()}.
+-spec drop(pk(), ?MODULE()) -> ?MODULE().
+-spec is_defined(sk(), ?MODULE()) -> boolean().
+-spec is_empty(?MODULE()) -> boolean().
+-spec smallest(?MODULE()) -> kv().
+-spec size(?MODULE()) -> non_neg_integer().
 
 %%----------------------------------------------------------------------------
 
index d5f0cbee6f5bde8a4291968009440c3703c8cc67..e4af1e8c1a5e39df437f12cf9afac39e0b2d15c6 100644 (file)
 -export([register_callback/3]).
 -export([open/3, close/1, read/2, append/2, needs_sync/1, sync/1, position/2,
          truncate/1, current_virtual_offset/1, current_raw_offset/1, flush/1,
-         copy/3, set_maximum_since_use/1, delete/1, clear/1]).
+         copy/3, set_maximum_since_use/1, delete/1, clear/1,
+         open_with_absolute_path/3]).
 -export([obtain/0, obtain/1, release/0, release/1, transfer/1, transfer/2,
          set_limit/1, get_limit/0, info_keys/0, with_handle/1, with_handle/2,
          info/0, info/1, clear_read_cache/0]).
 %% Specs
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--type(ref() :: any()).
--type(ok_or_error() :: 'ok' | {'error', any()}).
--type(val_or_error(T) :: {'ok', T} | {'error', any()}).
--type(position() :: ('bof' | 'eof' | non_neg_integer() |
+-type ref() :: any().
+-type ok_or_error() :: 'ok' | {'error', any()}.
+-type val_or_error(T) :: {'ok', T} | {'error', any()}.
+-type position() :: ('bof' | 'eof' | non_neg_integer() |
                      {('bof' |'eof'), non_neg_integer()} |
-                     {'cur', integer()})).
--type(offset() :: non_neg_integer()).
+                     {'cur', integer()}).
+-type offset() :: non_neg_integer().
 
--spec(register_callback/3 :: (atom(), atom(), [any()]) -> 'ok').
--spec(open/3 ::
+-spec register_callback(atom(), atom(), [any()]) -> 'ok'.
+-spec open
+        (file:filename(), [any()],
+         [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')} |
+          {'read_buffer', (non_neg_integer() | 'unbuffered')}]) ->
+            val_or_error(ref()).
+-spec open_with_absolute_path
         (file:filename(), [any()],
          [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')} |
-          {'read_buffer', (non_neg_integer() | 'unbuffered')}])
-        -> val_or_error(ref())).
--spec(close/1 :: (ref()) -> ok_or_error()).
--spec(read/2 :: (ref(), non_neg_integer()) ->
-                     val_or_error([char()] | binary()) | 'eof').
--spec(append/2 :: (ref(), iodata()) -> ok_or_error()).
--spec(sync/1 :: (ref()) ->  ok_or_error()).
--spec(position/2 :: (ref(), position()) -> val_or_error(offset())).
--spec(truncate/1 :: (ref()) -> ok_or_error()).
--spec(current_virtual_offset/1 :: (ref()) -> val_or_error(offset())).
--spec(current_raw_offset/1     :: (ref()) -> val_or_error(offset())).
--spec(flush/1 :: (ref()) -> ok_or_error()).
--spec(copy/3 :: (ref(), ref(), non_neg_integer()) ->
-                     val_or_error(non_neg_integer())).
--spec(delete/1 :: (ref()) -> ok_or_error()).
--spec(clear/1 :: (ref()) -> ok_or_error()).
--spec(set_maximum_since_use/1 :: (non_neg_integer()) -> 'ok').
--spec(obtain/0 :: () -> 'ok').
--spec(obtain/1 :: (non_neg_integer()) -> 'ok').
--spec(release/0 :: () -> 'ok').
--spec(release/1 :: (non_neg_integer()) -> 'ok').
--spec(transfer/1 :: (pid()) -> 'ok').
--spec(transfer/2 :: (pid(), non_neg_integer()) -> 'ok').
--spec(with_handle/1 :: (fun(() -> A)) -> A).
--spec(with_handle/2 :: (non_neg_integer(), fun(() -> A)) -> A).
--spec(set_limit/1 :: (non_neg_integer()) -> 'ok').
--spec(get_limit/0 :: () -> non_neg_integer()).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/0 :: () -> rabbit_types:infos()).
--spec(info/1 :: ([atom()]) -> rabbit_types:infos()).
--spec(ulimit/0 :: () -> 'unknown' | non_neg_integer()).
-
--endif.
+          {'read_buffer', (non_neg_integer() | 'unbuffered')}]) ->
+            val_or_error(ref()).
+-spec close(ref()) -> ok_or_error().
+-spec read
+        (ref(), non_neg_integer()) -> val_or_error([char()] | binary()) | 'eof'.
+-spec append(ref(), iodata()) -> ok_or_error().
+-spec sync(ref()) ->  ok_or_error().
+-spec position(ref(), position()) -> val_or_error(offset()).
+-spec truncate(ref()) -> ok_or_error().
+-spec current_virtual_offset(ref()) -> val_or_error(offset()).
+-spec current_raw_offset(ref()) -> val_or_error(offset()).
+-spec flush(ref()) -> ok_or_error().
+-spec copy(ref(), ref(), non_neg_integer()) -> val_or_error(non_neg_integer()).
+-spec delete(ref()) -> ok_or_error().
+-spec clear(ref()) -> ok_or_error().
+-spec set_maximum_since_use(non_neg_integer()) -> 'ok'.
+-spec obtain() -> 'ok'.
+-spec obtain(non_neg_integer()) -> 'ok'.
+-spec release() -> 'ok'.
+-spec release(non_neg_integer()) -> 'ok'.
+-spec transfer(pid()) -> 'ok'.
+-spec transfer(pid(), non_neg_integer()) -> 'ok'.
+-spec with_handle(fun(() -> A)) -> A.
+-spec with_handle(non_neg_integer(), fun(() -> A)) -> A.
+-spec set_limit(non_neg_integer()) -> 'ok'.
+-spec get_limit() -> non_neg_integer().
+-spec info_keys() -> rabbit_types:info_keys().
+-spec info() -> rabbit_types:infos().
+-spec info([atom()]) -> rabbit_types:infos().
+-spec ulimit() -> 'unknown' | non_neg_integer().
 
 %%----------------------------------------------------------------------------
 -define(INFO_KEYS, [total_limit, total_used, sockets_limit, sockets_used]).
@@ -300,9 +301,11 @@ register_callback(M, F, A)
     gen_server2:cast(?SERVER, {register_callback, self(), {M, F, A}}).
 
 open(Path, Mode, Options) ->
-    Path1 = filename:absname(Path),
+    open_with_absolute_path(filename:absname(Path), Mode, Options).
+
+open_with_absolute_path(Path, Mode, Options) ->
     File1 = #file { reader_count = RCount, has_writer = HasWriter } =
-        case get({Path1, fhc_file}) of
+        case get({Path, fhc_file}) of
             File = #file {} -> File;
             undefined       -> #file { reader_count = 0,
                                        has_writer = false }
@@ -311,15 +314,15 @@ open(Path, Mode, Options) ->
     IsWriter = is_writer(Mode1),
     case IsWriter andalso HasWriter of
         true  -> {error, writer_exists};
-        false -> {ok, Ref} = new_closed_handle(Path1, Mode1, Options),
-                 case get_or_reopen([{Ref, new}]) of
+        false -> {ok, Ref} = new_closed_handle(Path, Mode1, Options),
+                 case get_or_reopen_timed([{Ref, new}]) of
                      {ok, [_Handle1]} ->
                          RCount1 = case is_reader(Mode1) of
                                        true  -> RCount + 1;
                                        false -> RCount
                                    end,
                          HasWriter1 = HasWriter orelse IsWriter,
-                         put({Path1, fhc_file},
+                         put({Path, fhc_file},
                              File1 #file { reader_count = RCount1,
                                            has_writer = HasWriter1 }),
                          {ok, Ref};
@@ -375,7 +378,7 @@ read(Ref, Count) ->
                                offset           = Offset}
                   = tune_read_buffer_limit(Handle0, Count),
               WantedCount = Count - BufRem,
-              case prim_file_read(Hdl, lists:max([BufSz, WantedCount])) of
+              case prim_file_read(Hdl, max(BufSz, WantedCount)) of
                   {ok, Data} ->
                       <<_:BufPos/binary, BufTl/binary>> = Buf,
                       ReadCount = size(Data),
@@ -671,7 +674,7 @@ with_handles(Refs, Fun) ->
     with_handles(Refs, reset, Fun).
 
 with_handles(Refs, ReadBuffer, Fun) ->
-    case get_or_reopen([{Ref, reopen} || Ref <- Refs]) of
+    case get_or_reopen_timed([{Ref, reopen} || Ref <- Refs]) of
         {ok, Handles0} ->
             Handles = case ReadBuffer of
                           reset -> [reset_read_buffer(H) || H <- Handles0];
@@ -709,6 +712,10 @@ with_flushed_handles(Refs, ReadBuffer, Fun) ->
               end
       end).
 
+get_or_reopen_timed(RefNewOrReopens) ->
+    file_handle_cache_stats:update(
+      io_file_handle_open_attempt, fun() -> get_or_reopen(RefNewOrReopens) end).
+
 get_or_reopen(RefNewOrReopens) ->
     case partition_handles(RefNewOrReopens) of
         {OpenHdls, []} ->
@@ -1297,11 +1304,6 @@ pending_out({N, Queue}) ->
 pending_count({Count, _Queue}) ->
     Count.
 
-pending_is_empty({0, _Queue}) ->
-    true;
-pending_is_empty({_N, _Queue}) ->
-    false.
-
 %%----------------------------------------------------------------------------
 %% server helpers
 %%----------------------------------------------------------------------------
@@ -1348,17 +1350,24 @@ process_open(State = #fhc_state { limit        = Limit,
     {Pending1, State1} = process_pending(Pending, Limit - used(State), State),
     State1 #fhc_state { open_pending = Pending1 }.
 
-process_obtain(Type, State = #fhc_state { limit        = Limit,
-                                          obtain_limit = ObtainLimit }) ->
-    ObtainCount = obtain_state(Type, count, State),
-    Pending = obtain_state(Type, pending, State),
-    Quota = case Type of
-                file   -> Limit - (used(State));
-                socket -> lists:min([ObtainLimit - ObtainCount,
-                                     Limit - (used(State))])
-            end,
+process_obtain(socket, State = #fhc_state { limit        = Limit,
+                                            obtain_limit = ObtainLimit,
+                                            open_count = OpenCount,
+                                            obtain_count_socket = ObtainCount,
+                                            obtain_pending_socket = Pending,
+                                            obtain_count_file = ObtainCountF}) ->
+    Quota = min(ObtainLimit - ObtainCount,
+                Limit - (OpenCount + ObtainCount + ObtainCountF)),
     {Pending1, State1} = process_pending(Pending, Quota, State),
-    set_obtain_state(Type, pending, Pending1, State1).
+    State1#fhc_state{obtain_pending_socket = Pending1};
+process_obtain(file, State = #fhc_state { limit        = Limit,
+                                          open_count = OpenCount,
+                                          obtain_count_socket = ObtainCountS,
+                                          obtain_count_file = ObtainCountF,
+                                          obtain_pending_file = Pending}) ->
+    Quota = Limit - (OpenCount + ObtainCountS + ObtainCountF),
+    {Pending1, State1} = process_pending(Pending, Quota, State),
+    State1#fhc_state{obtain_pending_file = Pending1}.
 
 process_pending(Pending, Quota, State) when Quota =< 0 ->
     {Pending, State};
@@ -1383,26 +1392,21 @@ run_pending_item(#pending { kind      = Kind,
     true = ets:update_element(Clients, Pid, {#cstate.blocked, false}),
     update_counts(Kind, Pid, Requested, State).
 
-update_counts(Kind, Pid, Delta,
+update_counts(open, Pid, Delta,
               State = #fhc_state { open_count          = OpenCount,
-                                   obtain_count_file   = ObtainCountF,
-                                   obtain_count_socket = ObtainCountS,
                                    clients             = Clients }) ->
-    {OpenDelta, ObtainDeltaF, ObtainDeltaS} =
-        update_counts1(Kind, Pid, Delta, Clients),
-    State #fhc_state { open_count          = OpenCount    + OpenDelta,
-                       obtain_count_file   = ObtainCountF + ObtainDeltaF,
-                       obtain_count_socket = ObtainCountS + ObtainDeltaS }.
-
-update_counts1(open, Pid, Delta, Clients) ->
     ets:update_counter(Clients, Pid, {#cstate.opened, Delta}),
-    {Delta, 0, 0};
-update_counts1({obtain, file}, Pid, Delta, Clients) ->
+    State #fhc_state { open_count = OpenCount + Delta};
+update_counts({obtain, file}, Pid, Delta,
+              State = #fhc_state {obtain_count_file   = ObtainCountF,
+                                  clients             = Clients }) ->
     ets:update_counter(Clients, Pid, {#cstate.obtained_file, Delta}),
-    {0, Delta, 0};
-update_counts1({obtain, socket}, Pid, Delta, Clients) ->
+    State #fhc_state { obtain_count_file = ObtainCountF + Delta};
+update_counts({obtain, socket}, Pid, Delta,
+              State = #fhc_state {obtain_count_socket   = ObtainCountS,
+                                  clients             = Clients }) ->
     ets:update_counter(Clients, Pid, {#cstate.obtained_socket, Delta}),
-    {0, 0, Delta}.
+    State #fhc_state { obtain_count_socket = ObtainCountS + Delta}.
 
 maybe_reduce(State) ->
     case needs_reduce(State) of
@@ -1410,18 +1414,20 @@ maybe_reduce(State) ->
         false -> State
     end.
 
-needs_reduce(State = #fhc_state { limit                 = Limit,
-                                  open_pending          = OpenPending,
-                                  obtain_limit          = ObtainLimit,
-                                  obtain_count_socket   = ObtainCountS,
-                                  obtain_pending_file   = ObtainPendingF,
-                                  obtain_pending_socket = ObtainPendingS }) ->
+needs_reduce(#fhc_state { limit                 = Limit,
+                          open_count            = OpenCount,
+                          open_pending          = {OpenPending, _},
+                          obtain_limit          = ObtainLimit,
+                          obtain_count_socket   = ObtainCountS,
+                          obtain_count_file     = ObtainCountF,
+                          obtain_pending_file   = {ObtainPendingF, _},
+                          obtain_pending_socket = {ObtainPendingS, _} }) ->
     Limit =/= infinity
-        andalso ((used(State) > Limit)
-                 orelse (not pending_is_empty(OpenPending))
-                 orelse (not pending_is_empty(ObtainPendingF))
+        andalso (((OpenCount + ObtainCountS + ObtainCountF) > Limit)
+                 orelse (OpenPending =/= 0)
+                 orelse (ObtainPendingF =/= 0)
                  orelse (ObtainCountS < ObtainLimit
-                         andalso not pending_is_empty(ObtainPendingS))).
+                         andalso (ObtainPendingS =/= 0))).
 
 reduce(State = #fhc_state { open_pending          = OpenPending,
                             obtain_pending_file   = ObtainPendingFile,
@@ -1475,7 +1481,7 @@ notify_age(CStates, AverageAge) ->
 notify_age0(Clients, CStates, Required) ->
     case [CState || CState <- CStates, CState#cstate.callback =/= undefined] of
         []            -> ok;
-        Notifications -> S = random:uniform(length(Notifications)),
+        Notifications -> S = rand_compat:uniform(length(Notifications)),
                          {L1, L2} = lists:split(S, Notifications),
                          notify(Clients, Required, L2 ++ L1)
     end.
index ccf1e49662fe89970c723ac6d8cc356b75642cca..12a78f805e2295593646633a290ca7c0bbd75af4 100644 (file)
@@ -26,7 +26,7 @@
         [io_reopen, mnesia_ram_tx, mnesia_disk_tx,
          msg_store_read, msg_store_write,
          queue_index_journal_write, queue_index_write, queue_index_read]).
--define(COUNT_TIME, [io_sync, io_seek]).
+-define(COUNT_TIME, [io_sync, io_seek, io_file_handle_open_attempt]).
 -define(COUNT_TIME_BYTES, [io_read, io_write]).
 
 init() ->
index d3e1a4e4385b60955dd5065d3f136c6c17dc17b7..18302699a297f8fdb32adc94790fb674ee996eb2 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(stop/1 :: (pid()) -> 'ok').
--spec(fork/1 :: (pid()) -> 'ok').
--spec(finish/1 :: (pid()) -> 'ok').
--spec(in/2 :: (pid(), any()) -> 'ok').
--spec(sync_in/2 :: (pid(), any()) -> 'ok').
--spec(out/1 :: (pid()) -> {'value', any()} | 'empty').
-
--endif.
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec stop(pid()) -> 'ok'.
+-spec fork(pid()) -> 'ok'.
+-spec finish(pid()) -> 'ok'.
+-spec in(pid(), any()) -> 'ok'.
+-spec sync_in(pid(), any()) -> 'ok'.
+-spec out(pid()) -> {'value', any()} | 'empty'.
 
 %%----------------------------------------------------------------------------
 
index aeb050e15fea4b2eb30a17b22b02441a94fcf280..176e14537f2d08b1b12ed4748d09605311bead1b 100644 (file)
 %% For INSTR_MOD callbacks
 -export([call/3, cast/2, monitor/1, demonitor/1]).
 
--ifndef(use_specs).
--export([behaviour_info/1]).
--endif.
-
 -export([table_definitions/0]).
 
 -define(GROUP_TABLE, gm_group).
 
 -define(TAG, '$gm').
 
--ifdef(use_specs).
-
 -export_type([group_name/0]).
 
--type(group_name() :: any()).
--type(txn_fun() :: fun((fun(() -> any())) -> any())).
+-type group_name() :: any().
+-type txn_fun() :: fun((fun(() -> any())) -> any()).
 
--spec(create_tables/0 :: () -> 'ok' | {'aborted', any()}).
--spec(start_link/4 :: (group_name(), atom(), any(), txn_fun()) ->
-                           rabbit_types:ok_pid_or_error()).
--spec(leave/1 :: (pid()) -> 'ok').
--spec(broadcast/2 :: (pid(), any()) -> 'ok').
--spec(confirmed_broadcast/2 :: (pid(), any()) -> 'ok').
--spec(info/1 :: (pid()) -> rabbit_types:infos()).
--spec(validate_members/2 :: (pid(), [pid()]) -> 'ok').
--spec(forget_group/1 :: (group_name()) -> 'ok').
+-spec create_tables() -> 'ok' | {'aborted', any()}.
+-spec start_link(group_name(), atom(), any(), txn_fun()) ->
+          rabbit_types:ok_pid_or_error().
+-spec leave(pid()) -> 'ok'.
+-spec broadcast(pid(), any()) -> 'ok'.
+-spec confirmed_broadcast(pid(), any()) -> 'ok'.
+-spec info(pid()) -> rabbit_types:infos().
+-spec validate_members(pid(), [pid()]) -> 'ok'.
+-spec forget_group(group_name()) -> 'ok'.
 
 %% The joined, members_changed and handle_msg callbacks can all return
 %% any of the following terms:
 -callback handle_terminate(Args :: term(), Reason :: term()) ->
     ok | term().
 
--else.
-
-behaviour_info(callbacks) ->
-    [{joined, 2}, {members_changed, 3}, {handle_msg, 3}, {handle_terminate, 2}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
-
 create_tables() ->
     create_tables([?TABLE]).
 
@@ -551,9 +536,6 @@ forget_group(GroupName) ->
 
 init([GroupName, Module, Args, TxnFun]) ->
     put(process_name, {?MODULE, GroupName}),
-    _ = random:seed(erlang:phash2([node()]),
-                    time_compat:monotonic_time(),
-                    time_compat:unique_integer()),
     Self = make_member(GroupName),
     gen_server2:cast(self(), join),
     {ok, #state { self                = Self,
@@ -617,14 +599,20 @@ handle_call({add_on_right, NewMember}, _From,
                              group_name    = GroupName,
                              members_state = MembersState,
                              txn_executor  = TxnFun }) ->
-    Group = record_new_member_in_group(NewMember, Self, GroupName, TxnFun),
-    View1 = group_to_view(Group),
-    MembersState1 = remove_erased_members(MembersState, View1),
-    ok = send_right(NewMember, View1,
-                    {catchup, Self, prepare_members_state(MembersState1)}),
-    {Result, State1} = change_view(View1, State #state {
-                                            members_state = MembersState1 }),
-    handle_callback_result({Result, {ok, Group}, State1}).
+    try
+        Group = record_new_member_in_group(
+                  NewMember, Self, GroupName, TxnFun),
+        View1 = group_to_view(check_membership(Self, Group)),
+        MembersState1 = remove_erased_members(MembersState, View1),
+        ok = send_right(NewMember, View1,
+                        {catchup, Self, prepare_members_state(MembersState1)}),
+        {Result, State1} = change_view(View1, State #state {
+                                                members_state = MembersState1 }),
+        handle_callback_result({Result, {ok, Group}, State1})
+    catch
+        lost_membership ->
+            {stop, normal, State}
+    end.
 
 %% add_on_right causes a catchup to be sent immediately from the left,
 %% so we can never see this from the left neighbour. However, it's
@@ -638,19 +626,28 @@ handle_cast({?TAG, _ReqVer, check_neighbours},
 
 handle_cast({?TAG, ReqVer, Msg},
             State = #state { view          = View,
+                             self          = Self,
                              members_state = MembersState,
                              group_name    = GroupName }) ->
-    {Result, State1} =
-        case needs_view_update(ReqVer, View) of
-            true  -> View1 = group_to_view(dirty_read_group(GroupName)),
-                     MemberState1 = remove_erased_members(MembersState, View1),
-                     change_view(View1, State #state {
-                                          members_state = MemberState1 });
-            false -> {ok, State}
-        end,
-    handle_callback_result(
-      if_callback_success(
-        Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1));
+    try
+        {Result, State1} =
+            case needs_view_update(ReqVer, View) of
+                true  ->
+                    View1 = group_to_view(
+                              check_membership(Self,
+                                               dirty_read_group(GroupName))),
+                    MemberState1 = remove_erased_members(MembersState, View1),
+                    change_view(View1, State #state {
+                                         members_state = MemberState1 });
+                false -> {ok, State}
+            end,
+        handle_callback_result(
+          if_callback_success(
+            Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1))
+    catch
+        lost_membership ->
+            {stop, normal, State}
+    end;
 
 handle_cast({broadcast, _Msg, _SizeHint},
             State = #state { shutting_down = {true, _} }) ->
@@ -724,39 +721,44 @@ handle_info({'DOWN', MRef, process, _Pid, Reason},
                              group_name    = GroupName,
                              confirms      = Confirms,
                              txn_executor  = TxnFun }) ->
-    Member = case {Left, Right} of
-                 {{Member1, MRef}, _} -> Member1;
-                 {_, {Member1, MRef}} -> Member1;
-                 _                    -> undefined
-             end,
-    case {Member, Reason} of
-        {undefined, _} ->
-            noreply(State);
-        {_, {shutdown, ring_shutdown}} ->
-            noreply(State);
-        _ ->
-            %% In the event of a partial partition we could see another member
-            %% go down and then remove them from Mnesia. While they can
-            %% recover from this they'd have to restart the queue - not
-            %% ideal. So let's sleep here briefly just in case this was caused
-            %% by a partial partition; in which case by the time we record the
-            %% member death in Mnesia we will probably be in a full
-            %% partition and will not be assassinating another member.
-            timer:sleep(100),
-            View1 = group_to_view(record_dead_member_in_group(
-                                    Member, GroupName, TxnFun)),
-            handle_callback_result(
-              case alive_view_members(View1) of
-                  [Self] -> maybe_erase_aliases(
-                              State #state {
-                                members_state = blank_member_state(),
-                                confirms      = purge_confirms(Confirms) },
-                              View1);
-                  _      -> change_view(View1, State)
-              end)
+    try
+        check_membership(GroupName),
+        Member = case {Left, Right} of
+                     {{Member1, MRef}, _} -> Member1;
+                     {_, {Member1, MRef}} -> Member1;
+                     _                    -> undefined
+                 end,
+        case {Member, Reason} of
+            {undefined, _} ->
+                noreply(State);
+            {_, {shutdown, ring_shutdown}} ->
+                noreply(State);
+            _ ->
+                %% In the event of a partial partition we could see another member
+                %% go down and then remove them from Mnesia. While they can
+                %% recover from this they'd have to restart the queue - not
+                %% ideal. So let's sleep here briefly just in case this was caused
+                %% by a partial partition; in which case by the time we record the
+                %% member death in Mnesia we will probably be in a full
+                %% partition and will not be assassinating another member.
+                timer:sleep(100),
+                View1 = group_to_view(record_dead_member_in_group(Self,
+                                        Member, GroupName, TxnFun, true)),
+                handle_callback_result(
+                  case alive_view_members(View1) of
+                      [Self] -> maybe_erase_aliases(
+                                  State #state {
+                                    members_state = blank_member_state(),
+                                    confirms      = purge_confirms(Confirms) },
+                                  View1);
+                      _      -> change_view(View1, State)
+                  end)
+        end
+    catch
+        lost_membership ->
+            {stop, normal, State}
     end.
 
-
 terminate(Reason, #state { module = Module, callback_args = Args }) ->
     Module:handle_terminate(Args, Reason).
 
@@ -841,52 +843,30 @@ handle_msg({catchup, _NotLeft, _MembersState}, State) ->
 
 handle_msg({activity, Left, Activity},
            State = #state { self          = Self,
+                            group_name    = GroupName,
                             left          = {Left, _MRefL},
                             view          = View,
                             members_state = MembersState,
                             confirms      = Confirms })
   when MembersState =/= undefined ->
-    {MembersState1, {Confirms1, Activity1}} =
-        lists:foldl(
-          fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) ->
-                  with_member_acc(
-                    fun (Member = #member { pending_ack = PA,
-                                            last_pub    = LP,
-                                            last_ack    = LA },
-                         {Confirms2, Activity2}) ->
-                            case is_member_alias(Id, Self, View) of
-                                true ->
-                                    {ToAck, PA1} =
-                                        find_common(queue_from_pubs(Pubs), PA,
-                                                    queue:new()),
-                                    LA1 = last_ack(Acks, LA),
-                                    AckNums = acks_from_queue(ToAck),
-                                    Confirms3 = maybe_confirm(
-                                                  Self, Id, Confirms2, AckNums),
-                                    {Member #member { pending_ack = PA1,
-                                                      last_ack    = LA1 },
-                                     {Confirms3,
-                                      activity_cons(
-                                        Id, [], AckNums, Activity2)}};
-                                false ->
-                                    PA1 = apply_acks(Acks, join_pubs(PA, Pubs)),
-                                    LA1 = last_ack(Acks, LA),
-                                    LP1 = last_pub(Pubs, LP),
-                                    {Member #member { pending_ack = PA1,
-                                                      last_pub    = LP1,
-                                                      last_ack    = LA1 },
-                                     {Confirms2,
-                                      activity_cons(Id, Pubs, Acks, Activity2)}}
-                            end
-                    end, Id, MembersStateConfirmsActivity)
-          end, {MembersState, {Confirms, activity_nil()}}, Activity),
-    State1 = State #state { members_state = MembersState1,
-                            confirms      = Confirms1 },
-    Activity3 = activity_finalise(Activity1),
-    ok = maybe_send_activity(Activity3, State1),
-    {Result, State2} = maybe_erase_aliases(State1, View),
-    if_callback_success(
-      Result, fun activity_true/3, fun activity_false/3, Activity3, State2);
+    try
+        %% If we have to stop, do it asap so we avoid any ack confirmation
+        %% Membership must be checked again by erase_members_in_group, as the
+        %% node can be marked as dead on the meanwhile
+        check_membership(GroupName),
+        {MembersState1, {Confirms1, Activity1}} =
+            calculate_activity(MembersState, Confirms, Activity, Self, View),
+        State1 = State #state { members_state = MembersState1,
+                                confirms      = Confirms1 },
+        Activity3 = activity_finalise(Activity1),
+        ok = maybe_send_activity(Activity3, State1),
+        {Result, State2} = maybe_erase_aliases(State1, View),
+        if_callback_success(
+          Result, fun activity_true/3, fun activity_false/3, Activity3, State2)
+    catch
+        lost_membership ->
+            {{stop, normal}, State}
+    end;
 
 handle_msg({activity, _NotLeft, _Activity}, State) ->
     {ok, State}.
@@ -1086,13 +1066,13 @@ join_group(Self, GroupName, #gm_group { members = Members } = Group, TxnFun) ->
                                prune_or_create_group(Self, GroupName, TxnFun),
                                TxnFun);
                 Alive ->
-                    Left = lists:nth(random:uniform(length(Alive)), Alive),
+                    Left = lists:nth(rand_compat:uniform(length(Alive)), Alive),
                     Handler =
                         fun () ->
                                 join_group(
                                   Self, GroupName,
-                                  record_dead_member_in_group(
-                                    Left, GroupName, TxnFun),
+                                  record_dead_member_in_group(Self,
+                                    Left, GroupName, TxnFun, false),
                                   TxnFun)
                         end,
                     try
@@ -1142,47 +1122,84 @@ prune_or_create_group(Self, GroupName, TxnFun) ->
               end
       end).
 
-record_dead_member_in_group(Member, GroupName, TxnFun) ->
-    TxnFun(
-      fun () ->
-              Group = #gm_group { members = Members, version = Ver } =
-                  read_group(GroupName),
-              case lists:splitwith(
-                     fun (Member1) -> Member1 =/= Member end, Members) of
-                  {_Members1, []} -> %% not found - already recorded dead
-                      Group;
-                  {Members1, [Member | Members2]} ->
-                      Members3 = Members1 ++ [{dead, Member} | Members2],
-                      write_group(Group #gm_group { members = Members3,
-                                                    version = Ver + 1 })
-              end
-      end).
+record_dead_member_in_group(Self, Member, GroupName, TxnFun, Verify) ->
+    Fun =
+        fun () ->
+                try
+                    Group = #gm_group { members = Members, version = Ver } =
+                        case Verify of
+                            true ->
+                                check_membership(Self, read_group(GroupName));
+                            false ->
+                                read_group(GroupName)
+                        end,
+                    case lists:splitwith(
+                           fun (Member1) -> Member1 =/= Member end, Members) of
+                        {_Members1, []} -> %% not found - already recorded dead
+                            Group;
+                        {Members1, [Member | Members2]} ->
+                            Members3 = Members1 ++ [{dead, Member} | Members2],
+                            write_group(Group #gm_group { members = Members3,
+                                                          version = Ver + 1 })
+                    end
+                catch
+                    lost_membership ->
+                        %% The transaction must not be abruptly crashed, but
+                        %% leave the gen_server to stop normally
+                        {error, lost_membership}
+                end
+        end,
+    handle_lost_membership_in_txn(TxnFun, Fun).
+
+handle_lost_membership_in_txn(TxnFun, Fun) ->
+    case TxnFun(Fun)  of
+        {error, lost_membership = T} ->
+            throw(T);
+        Any ->
+            Any
+    end.
 
 record_new_member_in_group(NewMember, Left, GroupName, TxnFun) ->
-    TxnFun(
-      fun () ->
-              Group = #gm_group { members = Members, version = Ver } =
-                  read_group(GroupName),
-              {Prefix, [Left | Suffix]} =
-                  lists:splitwith(fun (M) -> M =/= Left end, Members),
-              write_group(Group #gm_group {
-                            members = Prefix ++ [Left, NewMember | Suffix],
-                            version = Ver + 1 })
-      end).
+    Fun =
+        fun () ->
+                try
+                    Group = #gm_group { members = Members, version = Ver } =
+                        check_membership(Left, read_group(GroupName)),
+                    {Prefix, [Left | Suffix]} =
+                        lists:splitwith(fun (M) -> M =/= Left end, Members),
+                    write_group(Group #gm_group {
+                                  members = Prefix ++ [Left, NewMember | Suffix],
+                                  version = Ver + 1 })
+                catch
+                    lost_membership ->
+                        %% The transaction must not be abruptly crashed, but
+                        %% leave the gen_server to stop normally
+                        {error, lost_membership}
+                end
+        end,
+    handle_lost_membership_in_txn(TxnFun, Fun).
 
-erase_members_in_group(Members, GroupName, TxnFun) ->
+erase_members_in_group(Self, Members, GroupName, TxnFun) ->
     DeadMembers = [{dead, Id} || Id <- Members],
-    TxnFun(
-      fun () ->
-              Group = #gm_group { members = [_|_] = Members1, version = Ver } =
-                  read_group(GroupName),
-              case Members1 -- DeadMembers of
-                  Members1 -> Group;
-                  Members2 -> write_group(
-                                Group #gm_group { members = Members2,
-                                                  version = Ver + 1 })
+    Fun =
+        fun () ->
+                try
+                    Group = #gm_group { members = [_|_] = Members1, version = Ver } =
+                        check_membership(Self, read_group(GroupName)),
+                    case Members1 -- DeadMembers of
+                        Members1 -> Group;
+                        Members2 -> write_group(
+                                      Group #gm_group { members = Members2,
+                                                        version = Ver + 1 })
+                    end
+              catch
+                  lost_membership ->
+                      %% The transaction must not be abruptly crashed, but
+                      %% leave the gen_server to stop normally
+                      {error, lost_membership}
               end
-      end).
+        end,
+    handle_lost_membership_in_txn(TxnFun, Fun).
 
 maybe_erase_aliases(State = #state { self          = Self,
                                      group_name    = GroupName,
@@ -1203,7 +1220,7 @@ maybe_erase_aliases(State = #state { self          = Self,
     View1 = case Erasable of
                 [] -> View;
                 _  -> group_to_view(
-                        erase_members_in_group(Erasable, GroupName, TxnFun))
+                        erase_members_in_group(Self, Erasable, GroupName, TxnFun))
             end,
     change_view(View1, State #state { members_state = MembersState1 }).
 
@@ -1303,7 +1320,11 @@ find_common(A, B, Common) ->
         {{{value, Val}, A1}, {{value, Val}, B1}} ->
             find_common(A1, B1, queue:in(Val, Common));
         {{empty, _A}, _} ->
-            {Common, B}
+            {Common, B};
+        {_, {_, B1}} ->
+            find_common(A, B1, Common);
+        {{_, A1}, _} ->
+            find_common(A1, B, Common)
     end.
 
 
@@ -1378,6 +1399,41 @@ maybe_send_activity(Activity, #state { self  = Self,
 send_right(Right, View, Msg) ->
     ok = neighbour_cast(Right, {?TAG, view_version(View), Msg}).
 
+calculate_activity(MembersState, Confirms, Activity, Self, View) ->
+    lists:foldl(
+      fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) ->
+              with_member_acc(
+                fun (Member = #member { pending_ack = PA,
+                                        last_pub    = LP,
+                                        last_ack    = LA },
+                     {Confirms2, Activity2}) ->
+                        case is_member_alias(Id, Self, View) of
+                            true ->
+                                {ToAck, PA1} =
+                                    find_common(queue_from_pubs(Pubs), PA,
+                                                queue:new()),
+                                LA1 = last_ack(Acks, LA),
+                                AckNums = acks_from_queue(ToAck),
+                                Confirms3 = maybe_confirm(
+                                              Self, Id, Confirms2, AckNums),
+                                {Member #member { pending_ack = PA1,
+                                                  last_ack    = LA1 },
+                                 {Confirms3,
+                                  activity_cons(
+                                    Id, [], AckNums, Activity2)}};
+                            false ->
+                                PA1 = apply_acks(Acks, join_pubs(PA, Pubs)),
+                                LA1 = last_ack(Acks, LA),
+                                LP1 = last_pub(Pubs, LP),
+                                {Member #member { pending_ack = PA1,
+                                                  last_pub    = LP1,
+                                                  last_ack    = LA1 },
+                                 {Confirms2,
+                                  activity_cons(Id, Pubs, Acks, Activity2)}}
+                        end
+                end, Id, MembersStateConfirmsActivity)
+      end, {MembersState, {Confirms, activity_nil()}}, Activity).
+
 callback(Args, Module, Activity) ->
     Result =
       lists:foldl(
@@ -1530,3 +1586,24 @@ call(Pid, Msg, Timeout) -> gen_server2:call(Pid, Msg, Timeout).
 cast(Pid, Msg)          -> gen_server2:cast(Pid, Msg).
 monitor(Pid)            -> erlang:monitor(process, Pid).
 demonitor(MRef)         -> erlang:demonitor(MRef).
+
+check_membership(Self, #gm_group{members = M} = Group) ->
+    case lists:member(Self, M) of
+        true ->
+            Group;
+        false ->
+            throw(lost_membership)
+    end.
+
+check_membership(GroupName) ->
+    case dirty_read_group(GroupName) of
+        #gm_group{members = M} ->
+            case lists:keymember(self(), 2, M) of
+                true ->
+                    ok;
+                false ->
+                    throw(lost_membership)
+            end;
+        {error, not_found} ->
+            throw(lost_membership)
+    end.
index 4e78346febe94175acd0a7ba56c29e362d5a65d2..fc7157dff1048981ad22d7bc6f247d5749770833 100644 (file)
 
 -define(QUEUE, queue).
 
--ifdef(use_specs).
-
 -export_type([?MODULE/0]).
 
--opaque(?MODULE() :: {non_neg_integer(), ?QUEUE:?QUEUE()}).
--type(value()     :: any()).
--type(result()    :: 'empty' | {'value', value()}).
-
--spec(new/0       :: () -> ?MODULE()).
--spec(is_empty/1  :: (?MODULE()) -> boolean()).
--spec(len/1       :: (?MODULE()) -> non_neg_integer()).
--spec(in/2        :: (value(), ?MODULE()) -> ?MODULE()).
--spec(in_r/2      :: (value(), ?MODULE()) -> ?MODULE()).
--spec(out/1       :: (?MODULE()) -> {result(), ?MODULE()}).
--spec(out_r/1     :: (?MODULE()) -> {result(), ?MODULE()}).
--spec(join/2      :: (?MODULE(), ?MODULE()) -> ?MODULE()).
--spec(foldl/3     :: (fun ((value(), B) -> B), B, ?MODULE()) -> B).
--spec(foldr/3     :: (fun ((value(), B) -> B), B, ?MODULE()) -> B).
--spec(from_list/1 :: ([value()]) -> ?MODULE()).
--spec(to_list/1   :: (?MODULE()) -> [value()]).
--spec(peek/1      :: (?MODULE()) -> result()).
--spec(peek_r/1    :: (?MODULE()) -> result()).
-
--endif.
+-include_lib("rabbit_common/include/old_builtin_types.hrl").
+
+-opaque ?MODULE() :: {non_neg_integer(), ?QUEUE_TYPE()}.
+-type value()     :: any().
+-type result()    :: 'empty' | {'value', value()}.
+
+-spec new() -> ?MODULE().
+-spec is_empty(?MODULE()) -> boolean().
+-spec len(?MODULE()) -> non_neg_integer().
+-spec in(value(), ?MODULE()) -> ?MODULE().
+-spec in_r(value(), ?MODULE()) -> ?MODULE().
+-spec out(?MODULE()) -> {result(), ?MODULE()}.
+-spec out_r(?MODULE()) -> {result(), ?MODULE()}.
+-spec join(?MODULE(), ?MODULE()) -> ?MODULE().
+-spec foldl(fun ((value(), B) -> B), B, ?MODULE()) -> B.
+-spec foldr(fun ((value(), B) -> B), B, ?MODULE()) -> B.
+-spec from_list([value()]) -> ?MODULE().
+-spec to_list(?MODULE()) -> [value()].
+-spec peek(?MODULE()) -> result().
+-spec peek_r(?MODULE()) -> result().
 
 new() -> {0, ?QUEUE:new()}.
 
index 0b1126eb12e3270607b149e9697b38e0909a2875..8d5c94663556b0750bcf0d79f28ebee9136198f6 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(sync/0 :: () -> 'ok').
-
--endif.
+-spec sync() -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
index 62fae2b077c918a93732763817ce5c1d779b5c20..e1f5219dcb0dd9ac4d116d8311da003a5ebe65cc 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
+-type name() :: term().
 
--type(name() :: term()).
+-spec start_link() -> {'ok', pid()} | {'error', any()}.
+-spec start() -> {'ok', pid()} | {'error', any()}.
+-spec join(name(), pid()) -> 'ok'.
+-spec leave(name(), pid()) -> 'ok'.
+-spec get_members(name()) -> [pid()].
+-spec in_group(name(), pid()) -> boolean().
 
--spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
--spec(start/0 :: () -> {'ok', pid()} | {'error', any()}).
--spec(join/2 :: (name(), pid()) -> 'ok').
--spec(leave/2 :: (name(), pid()) -> 'ok').
--spec(get_members/1 :: (name()) -> [pid()]).
--spec(in_group/2 :: (name(), pid()) -> boolean()).
-
--spec(sync/0 :: () -> 'ok').
-
--endif.
+-spec sync() -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
index ddbb3f1bf1b240cb32147c2c66e7163d14d1c931..f795e3738379ff96a9b995a8b002015d2338cef1 100644 (file)
@@ -1,7 +1,7 @@
 {application, rabbit,           %% -*- erlang -*-
  [{description, "RabbitMQ"},
   {id, "RabbitMQ"},
-  {vsn, "3.6.1"},
+  {vsn, "3.6.5"},
   {modules, []},
   {registered, [rabbit_amqqueue_sup,
                 rabbit_log,
@@ -97,5 +97,6 @@
          %% see rabbitmq-server#143
          {credit_flow_default_credit, {200, 50}},
          %% see rabbitmq-server#248
-         {channel_operation_timeout, 5000}
+         %% and rabbitmq-server#667
+         {channel_operation_timeout, 15000}
         ]}]}.
index 81c7eee580103caecf797a2532aedc6cb6963d9a..a86fd97925a68e9f2fa54dea9f3f07322f488c72 100644 (file)
@@ -22,7 +22,7 @@
          stop_and_halt/0, await_startup/0, status/0, is_running/0,
          is_running/1, environment/0, rotate_logs/1, force_event_refresh/1,
          start_fhc/0]).
--export([start/2, stop/1]).
+-export([start/2, stop/1, prep_stop/1]).
 -export([start_apps/1, stop_apps/1]).
 -export([log_location/1, config_files/0]). %% for testing and mgmt-agent
 
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--type(file_suffix() :: binary()).
+-type file_suffix() :: binary().
 %% this really should be an abstract type
--type(log_location() :: 'tty' | 'undefined' | file:filename()).
--type(param() :: atom()).
--type(app_name() :: atom()).
-
--spec(start/0 :: () -> 'ok').
--spec(boot/0 :: () -> 'ok').
--spec(stop/0 :: () -> 'ok').
--spec(stop_and_halt/0 :: () -> no_return()).
--spec(await_startup/0 :: () -> 'ok').
--spec(status/0 ::
+-type log_location() :: 'tty' | 'undefined' | file:filename().
+-type param() :: atom().
+-type app_name() :: atom().
+
+-spec start() -> 'ok'.
+-spec boot() -> 'ok'.
+-spec stop() -> 'ok'.
+-spec stop_and_halt() -> no_return().
+-spec await_startup() -> 'ok'.
+-spec status
         () -> [{pid, integer()} |
                {running_applications, [{atom(), string(), string()}]} |
                {os, {atom(), atom()}} |
                {erlang_version, string()} |
-               {memory, any()}]).
--spec(is_running/0 :: () -> boolean()).
--spec(is_running/1 :: (node()) -> boolean()).
--spec(environment/0 :: () -> [{param(), term()}]).
--spec(rotate_logs/1 :: (file_suffix()) -> rabbit_types:ok_or_error(any())).
--spec(force_event_refresh/1 :: (reference()) -> 'ok').
-
--spec(log_location/1 :: ('sasl' | 'kernel') -> log_location()).
-
--spec(start/2 :: ('normal',[]) ->
-                     {'error',
-                      {'erlang_version_too_old',
-                        {'found',string(),string()},
-                        {'required',string(),string()}}} |
-                     {'ok',pid()}).
--spec(stop/1 :: (_) -> 'ok').
-
--spec(maybe_insert_default_data/0 :: () -> 'ok').
--spec(boot_delegate/0 :: () -> 'ok').
--spec(recover/0 :: () -> 'ok').
--spec(start_apps/1 :: ([app_name()]) -> 'ok').
--spec(stop_apps/1 :: ([app_name()]) -> 'ok').
-
--endif.
+               {memory, any()}].
+-spec is_running() -> boolean().
+-spec is_running(node()) -> boolean().
+-spec environment() -> [{param(), term()}].
+-spec rotate_logs(file_suffix()) -> rabbit_types:ok_or_error(any()).
+-spec force_event_refresh(reference()) -> 'ok'.
+
+-spec log_location('sasl' | 'kernel') -> log_location().
+
+-spec start('normal',[]) ->
+          {'error',
+           {'erlang_version_too_old',
+            {'found',string(),string()},
+            {'required',string(),string()}}} |
+          {'ok',pid()}.
+-spec stop(_) -> 'ok'.
+
+-spec maybe_insert_default_data() -> 'ok'.
+-spec boot_delegate() -> 'ok'.
+-spec recover() -> 'ok'.
+-spec start_apps([app_name()]) -> 'ok'.
+-spec stop_apps([app_name()]) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
@@ -284,16 +280,120 @@ broker_start() ->
     Plugins = rabbit_plugins:setup(),
     ToBeLoaded = Plugins ++ ?APPS,
     start_apps(ToBeLoaded),
-    case os:type() of
-        {win32, _} -> ok;
-        _ -> case code:load_file(sd_notify) of
-                 {module, sd_notify} -> SDNotify = sd_notify,
-                                        SDNotify:sd_notify(0, "READY=1");
-                 {error, _} -> os:cmd("systemd-notify --ready")
-             end
-    end,
+    maybe_sd_notify(),
     ok = log_broker_started(rabbit_plugins:active()).
 
+%% Try to send systemd ready notification if it makes sense in the
+%% current environment. standard_error is used intentionally in all
+%% logging statements, so all this messages will end in systemd
+%% journal.
+maybe_sd_notify() ->
+    case sd_notify_ready() of
+        false ->
+            io:format(standard_error, "systemd READY notification failed, beware of timeouts~n", []);
+        _ ->
+            ok
+    end.
+
+sd_notify_ready() ->
+    case {os:type(), os:getenv("NOTIFY_SOCKET")} of
+        {{win32, _}, _} ->
+            true;
+        {_, [_|_]} -> %% Non-empty NOTIFY_SOCKET, give it a try
+            sd_notify_legacy() orelse sd_notify_socat();
+        _ ->
+            true
+    end.
+
+sd_notify_data() ->
+    "READY=1\nSTATUS=Initialized\nMAINPID=" ++ os:getpid() ++ "\n".
+
+sd_notify_legacy() ->
+    case code:load_file(sd_notify) of
+        {module, sd_notify} ->
+            SDNotify = sd_notify,
+            SDNotify:sd_notify(0, sd_notify_data()),
+            true;
+        {error, _} ->
+            false
+    end.
+
+%% socat(1) is the most portable way the sd_notify could be
+%% implemented in erlang, without introducing some NIF. Currently the
+%% following issues prevent us from implementing it in a more
+%% reasonable way:
+%% - systemd-notify(1) is unstable for non-root users
+%% - erlang doesn't support unix domain sockets.
+%%
+%% Some details on how we ended with such a solution:
+%%   https://github.com/rabbitmq/rabbitmq-server/issues/664
+sd_notify_socat() ->
+    case sd_current_unit() of
+        {ok, Unit} ->
+            io:format(standard_error, "systemd unit for activation check: \"~s\"~n", [Unit]),
+            sd_notify_socat(Unit);
+        _ ->
+            false
+    end.
+
+socat_socket_arg("@" ++ AbstractUnixSocket) ->
+    "abstract-sendto:" ++ AbstractUnixSocket;
+socat_socket_arg(UnixSocket) ->
+    "unix-sendto:" ++ UnixSocket.
+
+sd_open_port() ->
+    open_port(
+      {spawn_executable, os:find_executable("socat")},
+      [{args, [socat_socket_arg(os:getenv("NOTIFY_SOCKET")), "STDIO"]},
+       use_stdio, out]).
+
+sd_notify_socat(Unit) ->
+    case sd_open_port() of
+        {'EXIT', Exit} ->
+            io:format(standard_error, "Failed to start socat ~p~n", [Exit]),
+            false;
+        Port ->
+            Port ! {self(), {command, sd_notify_data()}},
+            Result = sd_wait_activation(Port, Unit),
+            port_close(Port),
+            Result
+    end.
+
+sd_current_unit() ->
+    case catch re:run(os:cmd("systemctl status " ++ os:getpid()), "([-.@0-9a-zA-Z]+)", [unicode, {capture, all_but_first, list}]) of
+        {'EXIT', _} ->
+            error;
+        {match, [Unit]} ->
+            {ok, Unit};
+        _ ->
+            error
+    end.
+
+sd_wait_activation(Port, Unit) ->
+    case os:find_executable("systemctl") of
+        false ->
+            io:format(standard_error, "'systemctl' unavailable, falling back to sleep~n", []),
+            timer:sleep(5000),
+            true;
+        _ ->
+            sd_wait_activation(Port, Unit, 10)
+    end.
+
+sd_wait_activation(_, _, 0) ->
+    io:format(standard_error, "Service still in 'activating' state, bailing out~n", []),
+    false;
+sd_wait_activation(Port, Unit, AttemptsLeft) ->
+    case os:cmd("systemctl show --property=ActiveState " ++ Unit) of
+        "ActiveState=activating\n" ->
+            timer:sleep(1000),
+            sd_wait_activation(Port, Unit, AttemptsLeft - 1);
+        "ActiveState=" ++ _ ->
+            true;
+        _ = Err->
+            io:format(standard_error, "Unexpected status from systemd ~p~n", [Err]),
+            false
+    end.
+
 start_it(StartFun) ->
     Marker = spawn_link(fun() -> receive stop -> ok end end),
     case catch register(rabbit_boot, Marker) of
@@ -332,6 +432,10 @@ stop_and_halt() ->
         stop()
     after
         rabbit_log:info("Halting Erlang VM~n", []),
+        %% Also duplicate this information to stderr, so console where
+        %% foreground broker was running (or systemd journal) will
+        %% contain information about graceful termination.
+        io:format(standard_error, "Gracefully halting Erlang VM~n", []),
         init:stop()
     end,
     ok.
@@ -381,6 +485,7 @@ await_startup(HaveSeenRabbitBoot) ->
 
 status() ->
     S1 = [{pid,                  list_to_integer(os:getpid())},
+          %% The timeout value used is twice that of gen_server:call/2.
           {running_applications, rabbit_misc:which_applications()},
           {os,                   os:type()},
           {erlang_version,       erlang:system_info(system_version)},
@@ -437,8 +542,9 @@ is_running() -> is_running(node()).
 is_running(Node) -> rabbit_nodes:is_process_running(Node, rabbit).
 
 environment() ->
+    %% The timeout value is twice that of gen_server:call/2.
     [{A, environment(A)} ||
-        {A, _, _} <- lists:keysort(1, application:which_applications())].
+        {A, _, _} <- lists:keysort(1, application:which_applications(10000))].
 
 environment(App) ->
     Ignore = [default_pass, included_applications],
@@ -480,17 +586,18 @@ start(normal, []) ->
             Error
     end.
 
-stop(_State) ->
+prep_stop(_State) ->
     ok = rabbit_alarm:stop(),
     ok = case rabbit_mnesia:is_clustered() of
-             true  -> rabbit_amqqueue:on_node_down(node());
+             true  -> ok;
              false -> rabbit_table:clear_ram_only_tables()
          end,
     ok.
 
--ifdef(use_specs).
--spec(boot_error/2 :: (term(), not_available | [tuple()]) -> no_return()).
--endif.
+stop(_) -> ok.
+
+-spec boot_error(term(), not_available | [tuple()]) -> no_return().
+
 boot_error({could_not_start, rabbit, {{timeout_waiting_for_tables, _}, _}},
            _Stacktrace) ->
     AllNodes = rabbit_mnesia:cluster_nodes(all),
@@ -518,10 +625,9 @@ boot_error(Reason, Stacktrace) ->
     Args = [Reason, log_location(kernel), log_location(sasl)],
     boot_error(Reason, Fmt, Args, Stacktrace).
 
--ifdef(use_specs).
--spec(boot_error/4 :: (term(), string(), [any()], not_available | [tuple()])
-                      -> no_return()).
--endif.
+-spec boot_error(term(), string(), [any()], not_available | [tuple()]) ->
+          no_return().
+
 boot_error(Reason, Fmt, Args, not_available) ->
     log_boot_error_and_exit(Reason, Fmt, Args);
 boot_error(Reason, Fmt, Args, Stacktrace) ->
@@ -693,7 +799,8 @@ print_banner() ->
               "~n  ##########  Logs: ~s"
               "~n  ######  ##        ~s"
               "~n  ##########"
-              "~n              Starting broker...",
+              "~n              Starting broker..."
+              "~n",
               [Product, Version, ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE,
                log_location(kernel), log_location(sasl)]).
 
@@ -722,11 +829,16 @@ log_banner() ->
     rabbit_log:info("~s", [Banner]).
 
 warn_if_kernel_config_dubious() ->
-    case erlang:system_info(kernel_poll) of
-        true  -> ok;
-        false -> rabbit_log:warning(
-                   "Kernel poll (epoll, kqueue, etc) is disabled. Throughput "
-                   "and CPU utilization may worsen.~n")
+    case os:type() of
+        {win32, _} ->
+            ok;
+        _ ->
+            case erlang:system_info(kernel_poll) of
+                true  -> ok;
+                false -> rabbit_log:warning(
+                           "Kernel poll (epoll, kqueue, etc) is disabled. Throughput "
+                           "and CPU utilization may worsen.~n")
+            end
     end,
     AsyncThreads = erlang:system_info(thread_pool_size),
     case AsyncThreads < ?ASYNC_THREADS_WARNING_THRESHOLD of
index dae4d4732ac183ce8522f87a96fe7d87d76ae368..3ae7d7f6906da194967c4889c6d4cfea3b0abeff 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([permission_atom/0]).
 
--type(permission_atom() :: 'configure' | 'read' | 'write').
-
--spec(check_user_pass_login/2 ::
-        (rabbit_types:username(), rabbit_types:password())
-        -> {'ok', rabbit_types:user()} |
-           {'refused', rabbit_types:username(), string(), [any()]}).
--spec(check_user_login/2 ::
-        (rabbit_types:username(), [{atom(), any()}])
-        -> {'ok', rabbit_types:user()} |
-           {'refused', rabbit_types:username(), string(), [any()]}).
--spec(check_user_loopback/2 :: (rabbit_types:username(),
-                                rabbit_net:socket() | inet:ip_address())
-        -> 'ok' | 'not_allowed').
--spec(check_vhost_access/3 ::
-        (rabbit_types:user(), rabbit_types:vhost(), rabbit_net:socket() | #authz_socket_info{})
-        -> 'ok' | rabbit_types:channel_exit()).
--spec(check_resource_access/3 ::
-        (rabbit_types:user(), rabbit_types:r(atom()), permission_atom())
-        -> 'ok' | rabbit_types:channel_exit()).
-
--endif.
+-type permission_atom() :: 'configure' | 'read' | 'write'.
+
+-spec check_user_pass_login
+        (rabbit_types:username(), rabbit_types:password()) ->
+            {'ok', rabbit_types:user()} |
+            {'refused', rabbit_types:username(), string(), [any()]}.
+-spec check_user_login
+        (rabbit_types:username(), [{atom(), any()}]) ->
+            {'ok', rabbit_types:user()} |
+            {'refused', rabbit_types:username(), string(), [any()]}.
+-spec check_user_loopback
+        (rabbit_types:username(), rabbit_net:socket() | inet:ip_address()) ->
+            'ok' | 'not_allowed'.
+-spec check_vhost_access
+        (rabbit_types:user(), rabbit_types:vhost(),
+         rabbit_net:socket() | #authz_socket_info{}) ->
+            'ok' | rabbit_types:channel_exit().
+-spec check_resource_access
+        (rabbit_types:user(), rabbit_types:r(atom()), permission_atom()) ->
+            'ok' | rabbit_types:channel_exit().
 
 %%----------------------------------------------------------------------------
 
index 30743ea24376ebde30afd715da60664e65b79ce8..dd64c6f1c8f8060c00d34d523e687619dd24c93d 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -record(alarms, {alertees :: dict:dict(pid(), rabbit_types:mfargs()),
                  alarmed_nodes :: dict:dict(node(), [resource_alarm_source()]),
                  alarms :: [alarm()]}).
 
--type(local_alarm() :: 'file_descriptor_limit').
--type(resource_alarm_source() :: 'disk' | 'memory').
--type(resource_alarm() :: {resource_limit, resource_alarm_source(), node()}).
--type(alarm() :: local_alarm() | resource_alarm()).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start/0 :: () -> 'ok').
--spec(stop/0 :: () -> 'ok').
--spec(register/2 :: (pid(), rabbit_types:mfargs()) -> [atom()]).
--spec(set_alarm/1 :: ({alarm(), []}) -> 'ok').
--spec(clear_alarm/1 :: (alarm()) -> 'ok').
--spec(on_node_up/1 :: (node()) -> 'ok').
--spec(on_node_down/1 :: (node()) -> 'ok').
--spec(get_alarms/0 :: () -> [{alarm(), []}]).
-
--else.
-
--record(alarms, {alertees, alarmed_nodes, alarms}).
-
--endif.
+-type local_alarm() :: 'file_descriptor_limit'.
+-type resource_alarm_source() :: 'disk' | 'memory'.
+-type resource_alarm() :: {resource_limit, resource_alarm_source(), node()}.
+-type alarm() :: local_alarm() | resource_alarm().
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec start() -> 'ok'.
+-spec stop() -> 'ok'.
+-spec register(pid(), rabbit_types:mfargs()) -> [atom()].
+-spec set_alarm({alarm(), []}) -> 'ok'.
+-spec clear_alarm(alarm()) -> 'ok'.
+-spec on_node_up(node()) -> 'ok'.
+-spec on_node_down(node()) -> 'ok'.
+-spec get_alarms() -> [{alarm(), []}].
 
 %%----------------------------------------------------------------------------
 
index 1a86851d0ac2170ecc575de6c7b42e645c0e7350..66df42987cd70196df139ec48a2080a0c2791c0b 100644 (file)
          prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
 
 %% Queue's state
--record(q, {q,
+-record(q, {
+            %% an #amqqueue record
+            q,
+            %% none | {exclusive consumer channel PID, consumer tag}
             exclusive_consumer,
+            %% Set to true if a queue has ever had a consumer.
+            %% This is used to determine when to delete auto-delete queues.
             has_had_consumers,
+            %% backing queue module.
+            %% for mirrored queues, this will be rabbit_mirror_queue_master.
+            %% for non-priority and non-mirrored queues, rabbit_variable_queue.
+            %% see rabbit_backing_queue.
             backing_queue,
+            %% backing queue state.
+            %% see rabbit_backing_queue, rabbit_variable_queue.
             backing_queue_state,
+            %% consumers state, see rabbit_queue_consumers
             consumers,
+            %% queue expiration value
             expires,
+            %% timer used to periodically sync (flush) queue index
             sync_timer_ref,
+            %% timer used to update ingress/egress rates and queue RAM duration target
             rate_timer_ref,
+            %% timer used to clean up this queue due to TTL (on when unused)
             expiry_timer_ref,
+            %% stats emission timer
             stats_timer,
+            %% maps message IDs to {channel pid, MsgSeqNo}
+            %% pairs
             msg_id_to_channel,
+            %% message TTL value
             ttl,
+            %% timer used to delete expired messages
             ttl_timer_ref,
             ttl_timer_expiry,
+            %% Keeps track of channels that publish to this queue.
+            %% When channel process goes down, queues have to perform
+            %% certain cleanup.
             senders,
+            %% dead letter exchange as a #resource record, if any
             dlx,
             dlx_routing_key,
+            %% max length in messages, if configured
             max_length,
+            %% max length in bytes, if configured
             max_bytes,
+            %% when policies change, this version helps queue
+            %% determine what previously scheduled/set up state to ignore,
+            %% e.g. message expiration messages from previously set up timers
+            %% that may or may not be still valid
             args_policy_version,
+            %% running | flow | idle
             status
            }).
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(init_with_backing_queue_state/7 ::
+-spec info_keys() -> rabbit_types:info_keys().
+-spec init_with_backing_queue_state
         (rabbit_types:amqqueue(), atom(), tuple(), any(),
-         [rabbit_types:delivery()], pmon:pmon(), dict:dict()) -> #q{}).
-
--endif.
+         [rabbit_types:delivery()], pmon:pmon(), ?DICT_TYPE()) ->
+            #q{}.
 
 %%----------------------------------------------------------------------------
 
          slave_pids,
          synchronised_slave_pids,
          recoverable_slaves,
-         state
+         state,
+         reductions,
+         garbage_collection
         ]).
 
 -define(CREATION_EVENT_KEYS,
@@ -893,6 +924,11 @@ i(recoverable_slaves, #q{q = #amqqueue{name    = Name,
     end;
 i(state, #q{status = running}) -> credit_flow:state();
 i(state, #q{status = State})   -> State;
+i(garbage_collection, _State) ->
+    rabbit_misc:get_gc_info(self());
+i(reductions, _State) ->
+    {reductions, Reductions} = erlang:process_info(self(), reductions),
+    Reductions;
 i(Item, #q{backing_queue_state = BQS, backing_queue = BQ}) ->
     BQ:info(Item, BQS).
 
index 6cc4f45b147a9169245210b1e871793426fcda62..f1e770aa455a6bdc05ed2deebf92780a973a93c3 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/2 :: (rabbit_types:amqqueue(), rabbit_prequeue:start_mode()) ->
-                           {'ok', pid(), pid()}).
-
--endif.
+-spec start_link(rabbit_types:amqqueue(), rabbit_prequeue:start_mode()) ->
+          {'ok', pid(), pid()}.
 
 %%----------------------------------------------------------------------------
 
@@ -39,7 +35,7 @@ start_link(Q, StartMode) ->
     Marker = spawn_link(fun() -> receive stop -> ok end end),
     ChildSpec = {rabbit_amqqueue,
                  {rabbit_prequeue, start_link, [Q, StartMode, Marker]},
-                 intrinsic, ?MAX_WAIT, worker, [rabbit_amqqueue_process,
+                 intrinsic, ?WORKER_WAIT, worker, [rabbit_amqqueue_process,
                                                 rabbit_mirror_queue_slave]},
     {ok, SupPid} = supervisor2:start_link(?MODULE, []),
     {ok, QPid} = supervisor2:start_child(SupPid, ChildSpec),
index bb89eace7873bf7e1d9a94dd17531d91ba498e19..c57d9334e26c99af25b518198b9a5240ee3da049 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start_queue_process/3 :: (node(), rabbit_types:amqqueue(),
-                               'declare' | 'recovery' | 'slave') -> pid()).
-
--endif.
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec start_queue_process
+        (node(), rabbit_types:amqqueue(), 'declare' | 'recovery' | 'slave') ->
+            pid().
 
 %%----------------------------------------------------------------------------
 
@@ -49,4 +46,4 @@ start_queue_process(Node, Q, StartMode) ->
 init([]) ->
     {ok, {{simple_one_for_one, 10, 10},
           [{rabbit_amqqueue_sup, {rabbit_amqqueue_sup, start_link, []},
-            temporary, ?MAX_WAIT, supervisor, [rabbit_amqqueue_sup]}]}}.
+            temporary, ?SUPERVISOR_WAIT, supervisor, [rabbit_amqqueue_sup]}]}}.
index 299e254c509ae56c4ca46cc16abe4e634bcc9dd7..51bc883976f76d48e85b13011b5b674db3d9602c 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([key/0, deletions/0]).
 
--type(key() :: binary()).
+-type key() :: binary().
 
--type(bind_errors() :: rabbit_types:error(
+-type bind_errors() :: rabbit_types:error(
                          {'resources_missing',
                           [{'not_found', (rabbit_types:binding_source() |
                                           rabbit_types:binding_destination())} |
-                           {'absent', rabbit_types:amqqueue()}]})).
+                           {'absent', rabbit_types:amqqueue()}]}).
 
--type(bind_ok_or_error() :: 'ok' | bind_errors() |
+-type bind_ok_or_error() :: 'ok' | bind_errors() |
                             rabbit_types:error(
                               'binding_not_found' |
-                              {'binding_invalid', string(), [any()]})).
--type(bind_res() :: bind_ok_or_error() | rabbit_misc:thunk(bind_ok_or_error())).
--type(inner_fun() ::
+                              {'binding_invalid', string(), [any()]}).
+-type bind_res() :: bind_ok_or_error() | rabbit_misc:thunk(bind_ok_or_error()).
+-type inner_fun() ::
         fun((rabbit_types:exchange(),
              rabbit_types:exchange() | rabbit_types:amqqueue()) ->
-                   rabbit_types:ok_or_error(rabbit_types:amqp_error()))).
--type(bindings() :: [rabbit_types:binding()]).
+                   rabbit_types:ok_or_error(rabbit_types:amqp_error())).
+-type bindings() :: [rabbit_types:binding()].
 
 %% TODO this should really be opaque but that seems to confuse 17.1's
 %% dialyzer into objecting to everything that uses it.
--type(deletions() :: dict:dict()).
-
--spec(recover/2 :: ([rabbit_exchange:name()], [rabbit_amqqueue:name()]) ->
-                        'ok').
--spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()).
--spec(add/1    :: (rabbit_types:binding())              -> bind_res()).
--spec(add/2    :: (rabbit_types:binding(), inner_fun()) -> bind_res()).
--spec(remove/1 :: (rabbit_types:binding())              -> bind_res()).
--spec(remove/2 :: (rabbit_types:binding(), inner_fun()) -> bind_res()).
--spec(list/1 :: (rabbit_types:vhost()) -> bindings()).
--spec(list_for_source/1 ::
-        (rabbit_types:binding_source()) -> bindings()).
--spec(list_for_destination/1 ::
-        (rabbit_types:binding_destination()) -> bindings()).
--spec(list_for_source_and_destination/2 ::
+-type deletions() :: ?DICT_TYPE().
+
+-spec recover([rabbit_exchange:name()], [rabbit_amqqueue:name()]) ->
+                        'ok'.
+-spec exists(rabbit_types:binding()) -> boolean() | bind_errors().
+-spec add(rabbit_types:binding())              -> bind_res().
+-spec add(rabbit_types:binding(), inner_fun()) -> bind_res().
+-spec remove(rabbit_types:binding())              -> bind_res().
+-spec remove(rabbit_types:binding(), inner_fun()) -> bind_res().
+-spec list(rabbit_types:vhost()) -> bindings().
+-spec list_for_source
+        (rabbit_types:binding_source()) -> bindings().
+-spec list_for_destination
+        (rabbit_types:binding_destination()) -> bindings().
+-spec list_for_source_and_destination
         (rabbit_types:binding_source(), rabbit_types:binding_destination()) ->
-                                                bindings()).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (rabbit_types:binding()) -> rabbit_types:infos()).
--spec(info/2 :: (rabbit_types:binding(), rabbit_types:info_keys()) ->
-                     rabbit_types:infos()).
--spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
--spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys())
-                   -> [rabbit_types:infos()]).
--spec(info_all/4 ::(rabbit_types:vhost(), rabbit_types:info_keys(),
-                    reference(), pid()) -> 'ok').
--spec(has_for_source/1 :: (rabbit_types:binding_source()) -> boolean()).
--spec(remove_for_source/1 :: (rabbit_types:binding_source()) -> bindings()).
--spec(remove_for_destination/2 ::
-        (rabbit_types:binding_destination(), boolean()) -> deletions()).
--spec(remove_transient_for_destination/1 ::
-        (rabbit_types:binding_destination()) -> deletions()).
--spec(process_deletions/1 :: (deletions()) -> rabbit_misc:thunk('ok')).
--spec(combine_deletions/2 :: (deletions(), deletions()) -> deletions()).
--spec(add_deletion/3 :: (rabbit_exchange:name(),
-                         {'undefined' | rabbit_types:exchange(),
-                          'deleted' | 'not_deleted',
-                          bindings()}, deletions()) -> deletions()).
--spec(new_deletions/0 :: () -> deletions()).
-
--endif.
+                                                bindings().
+-spec info_keys() -> rabbit_types:info_keys().
+-spec info(rabbit_types:binding()) -> rabbit_types:infos().
+-spec info(rabbit_types:binding(), rabbit_types:info_keys()) ->
+          rabbit_types:infos().
+-spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()].
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys()) ->
+          [rabbit_types:infos()].
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys(),
+                    reference(), pid()) -> 'ok'.
+-spec has_for_source(rabbit_types:binding_source()) -> boolean().
+-spec remove_for_source(rabbit_types:binding_source()) -> bindings().
+-spec remove_for_destination
+        (rabbit_types:binding_destination(), boolean()) -> deletions().
+-spec remove_transient_for_destination
+        (rabbit_types:binding_destination()) -> deletions().
+-spec process_deletions(deletions()) -> rabbit_misc:thunk('ok').
+-spec combine_deletions(deletions(), deletions()) -> deletions().
+-spec add_deletion
+        (rabbit_exchange:name(),
+         {'undefined' | rabbit_types:exchange(),
+          'deleted' | 'not_deleted',
+          bindings()},
+         deletions()) ->
+            deletions().
+-spec new_deletions() -> deletions().
 
 %%----------------------------------------------------------------------------
 
 -define(INFO_KEYS, [source_name, source_kind,
                     destination_name, destination_kind,
-                    routing_key, arguments]).
+                    routing_key, arguments,
+                    vhost]).
 
 recover(XNames, QNames) ->
     rabbit_misc:table_filter(
@@ -272,6 +272,7 @@ infos(Items, B) -> [{Item, i(Item, B)} || Item <- Items].
 
 i(source_name,      #binding{source      = SrcName})    -> SrcName#resource.name;
 i(source_kind,      #binding{source      = SrcName})    -> SrcName#resource.kind;
+i(vhost,            #binding{source      = SrcName})    -> SrcName#resource.virtual_host;
 i(destination_name, #binding{destination = DstName})    -> DstName#resource.name;
 i(destination_kind, #binding{destination = DstName})    -> DstName#resource.kind;
 i(routing_key,      #binding{key         = RoutingKey}) -> RoutingKey;
index 7aa369b6cac91888027711635be418c25fe68007..48cc1e15c2a2a31c4631ee1cb45e703b6f3fa71d 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([start_link_args/0]).
 
--type(start_link_args() ::
+-type start_link_args() ::
         {'tcp', rabbit_net:socket(), rabbit_channel:channel_number(),
          non_neg_integer(), pid(), string(), rabbit_types:protocol(),
          rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(),
          pid()} |
         {'direct', rabbit_channel:channel_number(), pid(), string(),
          rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
-         rabbit_framing:amqp_table(), pid()}).
-
--spec(start_link/1 :: (start_link_args()) -> {'ok', pid(), {pid(), any()}}).
+         rabbit_framing:amqp_table(), pid()}.
 
--endif.
+-spec start_link(start_link_args()) -> {'ok', pid(), {pid(), any()}}.
 
 -define(FAIR_WAIT, 70000).
 
index bf483da713414188d78f59d9be9ecbee932aa8d5..885d34d0a708ce72dd8d724753407dc4e9654175 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start_channel/2 :: (pid(), rabbit_channel_sup:start_link_args()) ->
-                              {'ok', pid(), {pid(), any()}}).
-
--endif.
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec start_channel(pid(), rabbit_channel_sup:start_link_args()) ->
+          {'ok', pid(), {pid(), any()}}.
 
 %%----------------------------------------------------------------------------
 
index 4aad3c091656880b66be4f46d949ebbff4a35725..6b3548221747797be019c7739501a68064250deb 100644 (file)
 -include("rabbit_cli.hrl").
 
 -export([main/3, start_distribution/0, start_distribution/1,
-         parse_arguments/4, rpc_call/4, rpc_call/5, rpc_call/7]).
+         parse_arguments/4, filter_opts/2,
+         rpc_call/4, rpc_call/5, rpc_call/7]).
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
+-type option_name() :: string().
+-type option_value() :: string() | node() | boolean().
+-type optdef() :: flag | {option, string()}.
+-type parse_result() :: {'ok', {atom(), [{option_name(), option_value()}], [string()]}} |
+                        'no_command'.
 
--type(optdef() :: flag | {option, string()}).
--type(parse_result() :: {'ok', {atom(), [{string(), string()}], [string()]}} |
-                        'no_command').
-
-
--spec(main/3 :: (fun (([string()], string()) -> parse_result()),
-                     fun ((atom(), atom(), [any()], [any()]) -> any()),
-                         atom()) -> no_return()).
--spec(start_distribution/0 :: () -> {'ok', pid()} | {'error', any()}).
--spec(start_distribution/1 :: (string()) -> {'ok', pid()} | {'error', any()}).
--spec(usage/1 :: (atom()) -> no_return()).
--spec(parse_arguments/4 ::
+-spec main
+        (fun (([string()], string()) -> parse_result()),
+         fun ((atom(), atom(), [any()], [any()]) -> any()),
+         atom()) ->
+            no_return().
+-spec start_distribution() -> {'ok', pid()} | {'error', any()}.
+-spec start_distribution(string()) -> {'ok', pid()} | {'error', any()}.
+-spec usage(atom()) -> no_return().
+-spec parse_arguments
         ([{atom(), [{string(), optdef()}]} | atom()],
-         [{string(), optdef()}], string(), [string()]) -> parse_result()).
--spec(rpc_call/4 :: (node(), atom(), atom(), [any()]) -> any()).
--spec(rpc_call/5 :: (node(), atom(), atom(), [any()], number()) -> any()).
--spec(rpc_call/7 :: (node(), atom(), atom(), [any()], reference(), pid(),
-                     number()) -> any()).
+         [{string(), optdef()}], string(), [string()]) ->
+          parse_result().
+
+-spec filter_opts([{option_name(), option_value()}], [option_name()]) ->
+          [boolean()].
 
--endif.
+-spec rpc_call(node(), atom(), atom(), [any()]) -> any().
+-spec rpc_call(node(), atom(), atom(), [any()], number()) -> any().
+-spec rpc_call
+        (node(), atom(), atom(), [any()], reference(), pid(), number()) ->
+            any().
 
 ensure_cli_distribution() ->
     case start_distribution() of
@@ -117,7 +123,10 @@ main(ParseFun, DoFun, UsageMod) ->
                 _ ->
                     print_error("unable to connect to node ~w: ~w", [Node, Reason]),
                     print_badrpc_diagnostics([Node]),
-                    rabbit_misc:quit(?EX_UNAVAILABLE)
+                    case Command of
+                        stop -> rabbit_misc:quit(?EX_OK);
+                        _    -> rabbit_misc:quit(?EX_UNAVAILABLE)
+                    end
             end;
         {badrpc_multi, Reason, Nodes} ->
             print_error("unable to connect to nodes ~p: ~w", [Nodes, Reason]),
@@ -138,7 +147,7 @@ main(ParseFun, DoFun, UsageMod) ->
 start_distribution_anon(0, LastError) ->
     {error, LastError};
 start_distribution_anon(TriesLeft, _) ->
-    NameCandidate = list_to_atom(rabbit_misc:format("rabbitmq-cli-~2..0b", [rabbit_misc:random(100)])),
+    NameCandidate = list_to_atom(rabbit_misc:format("rabbitmq-cli-~2..0b", [rand_compat:uniform(100)])),
     case net_kernel:start([NameCandidate, name_type()]) of
         {ok, _} = Result ->
             Result;
@@ -241,6 +250,22 @@ process_opts(Defs, C, [A | As], Found, KVs, Outs) ->
         {none, _, _}     -> no_command
     end.
 
+%% When we have a set of flags that are used for filtering, we want by
+%% default to include every such option in our output. But if a user
+%% explicitly specified any such flag, we want to include only items
+%% which he has requested.
+filter_opts(CurrentOptionValues, AllOptionNames) ->
+    Explicit = lists:map(fun(OptName) ->
+                                 proplists:get_bool(OptName, CurrentOptionValues)
+                         end,
+                         AllOptionNames),
+    case lists:member(true, Explicit) of
+        true ->
+            Explicit;
+        false ->
+            lists:duplicate(length(AllOptionNames), true)
+    end.
+
 %%----------------------------------------------------------------------------
 
 fmt_stderr(Format, Args) -> rabbit_misc:format_stderr(Format ++ "~n", Args).
@@ -255,14 +280,10 @@ print_badrpc_diagnostics(Nodes) ->
 %% a timeout unless we set our ticktime to be the same. So let's do
 %% that.
 rpc_call(Node, Mod, Fun, Args) ->
-    rpc_call(Node, Mod, Fun, Args, ?RPC_TIMEOUT).
+    rabbit_misc:rpc_call(Node, Mod, Fun, Args).
 
 rpc_call(Node, Mod, Fun, Args, Timeout) ->
-    case rpc:call(Node, net_kernel, get_net_ticktime, [], Timeout) of
-        {badrpc, _} = E -> E;
-        Time            -> net_kernel:set_net_ticktime(Time, 0),
-                           rpc:call(Node, Mod, Fun, Args, Timeout)
-    end.
+    rabbit_misc:rpc_call(Node, Mod, Fun, Args, Timeout).
 
 rpc_call(Node, Mod, Fun, Args, Ref, Pid, Timeout) ->
-    rpc_call(Node, Mod, Fun, Args++[Ref, Pid], Timeout).
+    rabbit_misc:rpc_call(Node, Mod, Fun, Args, Ref, Pid, Timeout).
index 5ca0cad5ae7c8e808aa13989797a5cfcc1d10166..77f0bcb9938636945559976edd5f2909948bb15d 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/1 :: (rabbit_types:mfargs()) ->
-                           rabbit_types:ok_pid_or_error()).
--spec(start_link/2 :: ({'local', atom()}, rabbit_types:mfargs()) ->
-                           rabbit_types:ok_pid_or_error()).
--spec(start_link_worker/2 :: ({'local', atom()}, rabbit_types:mfargs()) ->
-                                  rabbit_types:ok_pid_or_error()).
-
--endif.
+-spec start_link(rabbit_types:mfargs()) ->
+          rabbit_types:ok_pid_or_error().
+-spec start_link({'local', atom()}, rabbit_types:mfargs()) ->
+          rabbit_types:ok_pid_or_error().
+-spec start_link_worker({'local', atom()}, rabbit_types:mfargs()) ->
+          rabbit_types:ok_pid_or_error().
 
 %%----------------------------------------------------------------------------
 
@@ -53,5 +49,4 @@ init({M,F,A}) ->
           [{client, {M,F,A}, temporary, infinity, supervisor, [M]}]}};
 init({{M,F,A}, worker}) ->
     {ok, {{simple_one_for_one, 0, 1},
-          [{client, {M,F,A}, temporary, ?MAX_WAIT, worker, [M]}]}}.
-
+          [{client, {M,F,A}, temporary, ?WORKER_WAIT, worker, [M]}]}}.
index d89bc3d75398aaf9ae54a341ba413f1d3f9b6a90..bde520b74b8c2b4017f071c7de6526ea454d3d43 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start_channel_sup_sup/1 :: (pid()) -> rabbit_types:ok_pid_or_error()).
--spec(start_queue_collector/2 :: (pid(), rabbit_types:proc_name()) ->
-                                      rabbit_types:ok_pid_or_error()).
--endif.
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec start_channel_sup_sup(pid()) -> rabbit_types:ok_pid_or_error().
+-spec start_queue_collector(pid(), rabbit_types:proc_name()) ->
+          rabbit_types:ok_pid_or_error().
 
 %%----------------------------------------------------------------------------
 
@@ -59,10 +57,9 @@ start_queue_collector(SupPid, Identity) ->
     supervisor2:start_child(
       SupPid,
       {collector, {rabbit_queue_collector, start_link, [Identity]},
-       intrinsic, ?MAX_WAIT, worker, [rabbit_queue_collector]}).
+       intrinsic, ?WORKER_WAIT, worker, [rabbit_queue_collector]}).
 
 %%----------------------------------------------------------------------------
 
 init([]) ->
     {ok, {{one_for_one, 10, 10}, []}}.
-
index bacdf3992642d80fad21dc8939f452cb719b0a28..154bbb1922451680885b3a69e0f03d203001a38b 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/4 :: (any(), rabbit_net:socket(), module(), any()) -> {'ok', pid(), pid()}).
--spec(reader/1 :: (pid()) -> pid()).
-
--endif.
+-spec start_link(any(), rabbit_net:socket(), module(), any()) ->
+          {'ok', pid(), pid()}.
+-spec reader(pid()) -> pid().
 
 %%--------------------------------------------------------------------------
 
@@ -66,7 +63,7 @@ start_link(Ref, Sock, _Transport, _Opts) ->
         supervisor2:start_child(
           SupPid,
           {reader, {rabbit_reader, start_link, [HelperSup, Ref, Sock]},
-           intrinsic, ?MAX_WAIT, worker, [rabbit_reader]}),
+           intrinsic, ?WORKER_WAIT, worker, [rabbit_reader]}),
     {ok, SupPid, ReaderPid}.
 
 reader(Pid) ->
index 4cb7eb094b0ef059fe8dd07204252fc3945737fc..ea9d6a2030dc54aee0d7bb991493b5dc10ac327a 100644 (file)
 -module(rabbit_control_main).
 -include("rabbit.hrl").
 -include("rabbit_cli.hrl").
+-include("rabbit_misc.hrl").
 
 -export([start/0, stop/0, parse_arguments/2, action/5, action/6,
          sync_queue/1, cancel_sync_queue/1, become/1,
          purge_queue/1]).
 
--import(rabbit_cli, [rpc_call/4, rpc_call/5, rpc_call/7]).
+-import(rabbit_misc, [rpc_call/4, rpc_call/5, rpc_call/7]).
 
 -define(EXTERNAL_CHECK_INTERVAL, 1000).
 
@@ -36,6 +37,7 @@
          reset,
          force_reset,
          rotate_logs,
+         hipe_compile,
 
          {join_cluster, [?RAM_DEF]},
          change_cluster_node_type,
@@ -72,7 +74,7 @@
          {clear_policy, [?VHOST_DEF]},
          {list_policies, [?VHOST_DEF]},
 
-         {list_queues, [?VHOST_DEF]},
+         {list_queues, [?VHOST_DEF, ?OFFLINE_DEF, ?ONLINE_DEF]},
          {list_exchanges, [?VHOST_DEF]},
          {list_bindings, [?VHOST_DEF]},
          {list_connections, [?VHOST_DEF]},
@@ -83,6 +85,7 @@
          report,
          set_cluster_name,
          eval,
+         node_health_check,
 
          close_connection,
          {trace_on, [?VHOST_DEF]},
         [stop, stop_app, start_app, wait, reset, force_reset, rotate_logs,
          join_cluster, change_cluster_node_type, update_cluster_nodes,
          forget_cluster_node, rename_cluster_node, cluster_status, status,
-         environment, eval, force_boot, help]).
+         environment, eval, force_boot, help, hipe_compile]).
 
+%% [Command | {Command, DefaultTimeoutInMilliSeconds}]
 -define(COMMANDS_WITH_TIMEOUT,
         [list_user_permissions, list_policies, list_queues, list_exchanges,
          list_bindings, list_connections, list_channels, list_consumers,
          list_vhosts, list_parameters,
-         purge_queue]).
+         purge_queue,
+         {node_health_check, 70000}]).
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start/0 :: () -> no_return()).
--spec(stop/0 :: () -> 'ok').
--spec(action/5 ::
+-spec start() -> no_return().
+-spec stop() -> 'ok'.
+-spec action
         (atom(), node(), [string()], [{string(), any()}],
-         fun ((string(), [any()]) -> 'ok'))
-        -> 'ok').
+         fun ((string(), [any()]) -> 'ok')) ->
+            'ok'.
 
--spec(action/6 ::
+-spec action
         (atom(), node(), [string()], [{string(), any()}],
-         fun ((string(), [any()]) -> 'ok'), timeout())
-        -> 'ok').
-
--endif.
+         fun ((string(), [any()]) -> 'ok'), timeout()) ->
+            'ok'.
 
 %%----------------------------------------------------------------------------
 
@@ -153,7 +154,7 @@ start() ->
                                     end
                        end,
               try
-                  T = case get_timeout(Opts) of
+                  T = case get_timeout(Command, Opts) of
                           {ok, Timeout} ->
                               Timeout;
                           {error, _} ->
@@ -188,8 +189,23 @@ print_report0(Node, {Module, InfoFun, KeysFun}, VHostArg) ->
     end,
     io:nl().
 
-get_timeout(Opts) ->
-    parse_timeout(proplists:get_value(?TIMEOUT_OPT, Opts, ?RPC_TIMEOUT)).
+get_timeout(Command, Opts) ->
+    Default = case proplists:lookup(Command, ?COMMANDS_WITH_TIMEOUT) of
+                  none ->
+                      infinity;
+                  {Command, true} ->
+                      ?RPC_TIMEOUT;
+                  {Command, D} ->
+                      D
+              end,
+    Result = case proplists:get_value(?TIMEOUT_OPT, Opts, Default) of
+        use_default ->
+            parse_timeout(Default);
+        Value ->
+            parse_timeout(Value)
+    end,
+    Result.
+
 
 parse_number(N) when is_list(N) ->
     try list_to_integer(N) of
@@ -235,11 +251,11 @@ do_action(Command, Node, Args, Opts, Inform, Timeout) ->
         false ->
             case ensure_app_running(Node) of
                 ok ->
-                    case lists:member(Command, ?COMMANDS_WITH_TIMEOUT) of
-                        true  ->
+                    case proplists:lookup(Command, ?COMMANDS_WITH_TIMEOUT) of
+                        {Command, _}  ->
                             announce_timeout(Timeout, Inform),
                             action(Command, Node, Args, Opts, Inform, Timeout);
-                        false ->
+                        none ->
                             action(Command, Node, Args, Opts, Inform)
                     end;
                 E  -> E
@@ -381,6 +397,16 @@ action(rotate_logs, Node, Args = [Suffix], _Opts, Inform) ->
     Inform("Rotating logs to files with suffix \"~s\"", [Suffix]),
     call(Node, {rabbit, rotate_logs, Args});
 
+action(hipe_compile, _Node, [TargetDir], _Opts, _Inform) ->
+    ok = application:load(rabbit),
+    case rabbit_hipe:can_hipe_compile() of
+        true ->
+            {ok, _, _} = rabbit_hipe:compile_to_directory(TargetDir),
+            ok;
+        false ->
+            {error, "HiPE compilation is not supported"}
+    end;
+
 action(close_connection, Node, [PidStr, Explanation], _Opts, Inform) ->
     Inform("Closing connection \"~s\"", [PidStr]),
     rpc_call(Node, rabbit_networking, close_connection,
@@ -506,9 +532,15 @@ action(set_policy, Node, [Key, Pattern, Defn], Opts, Inform) ->
     PriorityArg = proplists:get_value(?PRIORITY_OPT, Opts),
     ApplyToArg = list_to_binary(proplists:get_value(?APPLY_TO_OPT, Opts)),
     Inform(Msg, [Key, Pattern, Defn, PriorityArg]),
-    rpc_call(
+    Res = rpc_call(
       Node, rabbit_policy, parse_set,
-      [VHostArg, list_to_binary(Key), Pattern, Defn, PriorityArg, ApplyToArg]);
+      [VHostArg, list_to_binary(Key), Pattern, Defn, PriorityArg, ApplyToArg]),
+    case Res of
+        {error, Format, Args} when is_list(Format) andalso is_list(Args) ->
+            {error_string, rabbit_misc:format(Format, Args)};
+        _ ->
+            Res
+    end;
 
 action(clear_policy, Node, [Key], Opts, Inform) ->
     VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
@@ -600,10 +632,11 @@ action(list_user_permissions, Node, Args = [_Username], _Opts, Inform, Timeout)
          true);
 
 action(list_queues, Node, Args, Opts, Inform, Timeout) ->
+    [Online, Offline] = rabbit_cli:filter_opts(Opts, [?ONLINE_OPT, ?OFFLINE_OPT]),
     Inform("Listing queues", []),
     VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
     ArgAtoms = default_if_empty(Args, [name, messages]),
-    call(Node, {rabbit_amqqueue, info_all, [VHostArg, ArgAtoms]},
+    call(Node, {rabbit_amqqueue, info_all, [VHostArg, ArgAtoms, Online, Offline]},
          ArgAtoms, Timeout);
 
 action(list_exchanges, Node, Args, Opts, Inform, Timeout) ->
@@ -639,19 +672,25 @@ action(list_consumers, Node, _Args, Opts, Inform, Timeout) ->
     Inform("Listing consumers", []),
     VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
     call(Node, {rabbit_amqqueue, consumers_all, [VHostArg]},
-         rabbit_amqqueue:consumer_info_keys(), Timeout).
+         rabbit_amqqueue:consumer_info_keys(), Timeout);
+
+action(node_health_check, Node, _Args, _Opts, Inform, Timeout) ->
+    Inform("Checking health of node ~p", [Node]),
+    case rabbit_health_check:node(Node, Timeout) of
+        ok ->
+            io:format("Health check passed~n"),
+            ok;
+        Other ->
+            Other
+    end.
 
 format_parse_error({_Line, Mod, Err}) -> lists:flatten(Mod:format_error(Err)).
 
 sync_queue(Q) ->
-    rabbit_amqqueue:with(
-      Q, fun(#amqqueue{pid = QPid}) -> rabbit_amqqueue:sync_mirrors(QPid) end).
+    rabbit_mirror_queue_misc:sync_queue(Q).
 
 cancel_sync_queue(Q) ->
-    rabbit_amqqueue:with(
-      Q, fun(#amqqueue{pid = QPid}) ->
-                 rabbit_amqqueue:cancel_sync_mirrors(QPid)
-         end).
+    rabbit_mirror_queue_misc:cancel_sync_queue(Q).
 
 purge_queue(Q) ->
     rabbit_amqqueue:with(
@@ -740,15 +779,26 @@ default_if_empty(List, Default) when is_list(List) ->
        true       -> [list_to_atom(X) || X <- List]
     end.
 
+display_info_message_row(IsEscaped, Result, InfoItemKeys) ->
+    display_row([format_info_item(
+                   case proplists:lookup(X, Result) of
+                       none when is_list(Result), length(Result) > 0 ->
+                           exit({error, {bad_info_key, X}});
+                       none -> Result;
+                       {X, Value} -> Value
+                   end, IsEscaped) || X <- InfoItemKeys]).
+
 display_info_message(IsEscaped) ->
-    fun(Result, InfoItemKeys) ->
-            display_row([format_info_item(
-                           case proplists:lookup(X, Result) of
-                               none when is_list(Result), length(Result) > 0 ->
-                                   exit({error, {bad_info_key, X}});
-                               none -> Result;
-                               {X, Value} -> Value
-                           end, IsEscaped) || X <- InfoItemKeys])
+    fun ([], _) ->
+            ok;
+        ([FirstResult|_] = List, InfoItemKeys) when is_list(FirstResult) ->
+            lists:foreach(fun(Result) ->
+                                  display_info_message_row(IsEscaped, Result, InfoItemKeys)
+                          end,
+                          List),
+            ok;
+        (Result, InfoItemKeys) ->
+            display_info_message_row(IsEscaped, Result, InfoItemKeys)
     end.
 
 display_info_list(Results, InfoItemKeys) when is_list(Results) ->
@@ -903,6 +953,9 @@ nodes_in_cluster(Node) ->
     unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running]).
 
 alarms_by_node(Name) ->
-    Status = unsafe_rpc(Name, rabbit, status, []),
-    {_, As} = lists:keyfind(alarms, 1, Status),
-    {Name, As}.
+    case rpc_call(Name, rabbit, status, []) of
+        {badrpc,nodedown} -> {Name, [nodedown]};
+        Status ->
+            {_, As} = lists:keyfind(alarms, 1, Status),
+            {Name, As}
+    end.
index 252405d62b037c9f569dd1906b183077af9bf907..91d23c83a4d6c7ba32b97996b37f82b5ec6db4a1 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -type reason() :: 'expired' | 'rejected' | 'maxlen'.
 
 -spec publish(rabbit_types:message(), reason(), rabbit_types:exchange(),
               'undefined' | binary(), rabbit_amqqueue:name()) -> 'ok'.
 
--endif.
-
 %%----------------------------------------------------------------------------
 
 publish(Msg, Reason, X, RK, QName) ->
@@ -139,7 +135,19 @@ update_x_death_header(Info, Headers) ->
                     end,
             rabbit_misc:set_table_value(
               Headers, <<"x-death">>, array,
-              [{table, rabbit_misc:sort_field_table(Info1)} | Others])
+              [{table, rabbit_misc:sort_field_table(Info1)} | Others]);
+        {<<"x-death">>, InvalidType, Header} ->
+            rabbit_log:warning("Message has invalid x-death header (type: ~p)."
+                               " Resetting header ~p~n",
+                               [InvalidType, Header]),
+            %% if x-death is something other than an array (list)
+            %% then we reset it: this happens when some clients consume
+            %% a message and re-publish is, converting header values
+            %% to strings, intentionally or not.
+            %% See rabbitmq/rabbitmq-server#767 for details.
+            rabbit_misc:set_table_value(
+              Headers, <<"x-death">>, array,
+              [{table, [{<<"count">>, long, 1} | Info]}])
     end.
 
 ensure_xdeath_event_count({table, Info}, InitialVal) when InitialVal >= 1 ->
index 35d7eb7940f46e519f4baa8de2fe9cbb3e7b71ae..061105c150126d58996496049877d5636fec7681 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(boot/0 :: () -> 'ok').
--spec(force_event_refresh/1 :: (reference()) -> 'ok').
--spec(list/0 :: () -> [pid()]).
--spec(list_local/0 :: () -> [pid()]).
--spec(connect/5 :: (({'none', 'none'} | {rabbit_types:username(), 'none'} |
-                     {rabbit_types:username(), rabbit_types:password()}),
-                    rabbit_types:vhost(), rabbit_types:protocol(), pid(),
-                    rabbit_event:event_props()) ->
-                        rabbit_types:ok_or_error2(
-                          {rabbit_types:user(), rabbit_framing:amqp_table()},
-                          'broker_not_found_on_node' |
-                          {'auth_failure', string()} | 'access_refused')).
--spec(start_channel/9 ::
+-spec boot() -> 'ok'.
+-spec force_event_refresh(reference()) -> 'ok'.
+-spec list() -> [pid()].
+-spec list_local() -> [pid()].
+-spec connect
+        (({'none', 'none'} | {rabbit_types:username(), 'none'} |
+          {rabbit_types:username(), rabbit_types:password()}),
+         rabbit_types:vhost(), rabbit_types:protocol(), pid(),
+         rabbit_event:event_props()) ->
+            rabbit_types:ok_or_error2(
+              {rabbit_types:user(), rabbit_framing:amqp_table()},
+              'broker_not_found_on_node' |
+              {'auth_failure', string()} | 'access_refused').
+-spec start_channel
         (rabbit_channel:channel_number(), pid(), pid(), string(),
          rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
-         rabbit_framing:amqp_table(), pid()) -> {'ok', pid()}).
--spec(disconnect/2 :: (pid(), rabbit_event:event_props()) -> 'ok').
-
--endif.
+         rabbit_framing:amqp_table(), pid()) ->
+            {'ok', pid()}.
+-spec disconnect(pid(), rabbit_event:event_props()) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
@@ -76,8 +74,8 @@ connect({Username, none}, VHost, Protocol, Pid, Infos) ->
              VHost, Protocol, Pid, Infos);
 
 connect({Username, Password}, VHost, Protocol, Pid, Infos) ->
-    connect0(fun () -> rabbit_access_control:check_user_pass_login(
-                         Username, Password) end,
+    connect0(fun () -> rabbit_access_control:check_user_login(
+                         Username, [{password, Password}, {vhost, VHost}]) end,
              VHost, Protocol, Pid, Infos).
 
 connect0(AuthFun, VHost, Protocol, Pid, Infos) ->
index 124306487e8b28da96773062f695be4ac3c9c663..4c1ff0248621b9c911d1ae86d4c552a9e0913798 100644 (file)
@@ -1,4 +1,4 @@
-%% The contents of this file are subject to the Mozilla Public License
+% The contents of this file are subject to the Mozilla Public License
 %% Version 1.1 (the "License"); you may not use this file except in
 %% compliance with the License. You may obtain a copy of the License
 %% at http://www.mozilla.org/MPL/
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--type(disk_free_limit() :: (integer() | string() | {'mem_relative', float()})).
--spec(start_link/1 :: (disk_free_limit()) -> rabbit_types:ok_pid_or_error()).
--spec(get_disk_free_limit/0 :: () -> integer()).
--spec(set_disk_free_limit/1 :: (disk_free_limit()) -> 'ok').
--spec(get_min_check_interval/0 :: () -> integer()).
--spec(set_min_check_interval/1 :: (integer()) -> 'ok').
--spec(get_max_check_interval/0 :: () -> integer()).
--spec(set_max_check_interval/1 :: (integer()) -> 'ok').
--spec(get_disk_free/0 :: () -> (integer() | 'unknown')).
-
--endif.
+-type disk_free_limit() :: (integer() | string() | {'mem_relative', float()}).
+-spec start_link(disk_free_limit()) -> rabbit_types:ok_pid_or_error().
+-spec get_disk_free_limit() -> integer().
+-spec set_disk_free_limit(disk_free_limit()) -> 'ok'.
+-spec get_min_check_interval() -> integer().
+-spec set_min_check_interval(integer()) -> 'ok'.
+-spec get_max_check_interval() -> integer().
+-spec set_max_check_interval(integer()) -> 'ok'.
+-spec get_disk_free() -> (integer() | 'unknown').
 
 %%----------------------------------------------------------------------------
 %% Public API
@@ -213,9 +209,11 @@ get_disk_free(Dir) ->
 
 get_disk_free(Dir, {unix, Sun})
   when Sun =:= sunos; Sun =:= sunos4; Sun =:= solaris ->
-    parse_free_unix(rabbit_misc:os_cmd("/usr/bin/df -k " ++ Dir));
+    Df = os:find_executable("df"),
+    parse_free_unix(rabbit_misc:os_cmd(Df ++ " -k " ++ Dir));
 get_disk_free(Dir, {unix, _}) ->
-    parse_free_unix(rabbit_misc:os_cmd("/bin/df -kP " ++ Dir));
+    Df = os:find_executable("df"),
+    parse_free_unix(rabbit_misc:os_cmd(Df ++ " -kP " ++ Dir));
 get_disk_free(Dir, {win32, _}) ->
     parse_free_win32(rabbit_misc:os_cmd("dir /-C /W \"" ++ Dir ++ "\"")).
 
@@ -235,7 +233,7 @@ parse_free_win32(CommandResult) ->
     list_to_integer(lists:reverse(Free)).
 
 interpret_limit({mem_relative, Relative}) 
-    when is_float(Relative), Relative < 1 ->
+    when is_float(Relative) ->
     round(Relative * vm_memory_monitor:get_total_memory());
 interpret_limit(Absolute) -> 
     case rabbit_resource_monitor_misc:parse_information_unit(Absolute) of
index d95ec49140c43244d5e1300598ba262be5631fa0..7f01a7183889755fa7e7c73375f47207967316fc 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
-
--endif.
+-spec start_link() -> rabbit_types:ok_pid_or_error().
 
 %%----------------------------------------------------------------------------
 %% It's possible for epmd to be killed out from underneath us. If that
index d8472842430b92382d2563df30eecc56078177f5..5ba3ce7a4f3d408821b670f4cbd5d7db932949b1 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start/0 :: () -> 'ok').
--spec(stop/0  :: () -> 'ok').
-
--endif.
+-spec start() -> 'ok'.
+-spec stop() -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
@@ -54,7 +50,7 @@ start() ->
 
 stop() ->
     case error_logger:delete_report_handler(rabbit_error_logger) of
-        terminated_ok             -> ok;
+        ok                        -> ok;
         {error, module_not_found} -> ok
     end.
 
@@ -69,7 +65,7 @@ init([DefaultVHost]) ->
                    name = ?LOG_EXCH_NAME}}.
 
 terminate(_Arg, _State) ->
-    terminated_ok.
+    ok.
 
 code_change(_OldVsn, State, _Extra) ->
     {ok, State}.
@@ -105,10 +101,11 @@ publish1(RoutingKey, Format, Data, LogExch) ->
 
     Args = [truncate:term(A, ?LOG_TRUNC) || A <- Data],
     Headers = [{<<"node">>, longstr, list_to_binary(atom_to_list(node()))}],
-    {ok, _DeliveredQPids} =
-        rabbit_basic:publish(LogExch, RoutingKey,
-                             #'P_basic'{content_type = <<"text/plain">>,
-                                        timestamp    = Timestamp,
-                                        headers      = Headers},
-                             list_to_binary(io_lib:format(Format, Args))),
-    ok.
+    case rabbit_basic:publish(LogExch, RoutingKey,
+                              #'P_basic'{content_type = <<"text/plain">>,
+                                         timestamp    = Timestamp,
+                                         headers      = Headers},
+                              list_to_binary(io_lib:format(Format, Args))) of
+        {ok, _QPids}  -> ok;
+        {error, _Err} -> ok
+    end.
index 2e9afbfd2e7d50314b8c7bd70348f1bbfbf6fef5..aaea27f91ac93eafc140df10527e7602d35a92f4 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([name/0, type/0]).
 
--type(name() :: rabbit_types:r('exchange')).
--type(type() :: atom()).
--type(fun_name() :: atom()).
+-type name() :: rabbit_types:r('exchange').
+-type type() :: atom().
+-type fun_name() :: atom().
 
--spec(recover/0 :: () -> [name()]).
--spec(callback/4::
+-spec recover() -> [name()].
+-spec callback
         (rabbit_types:exchange(), fun_name(),
-         fun((boolean()) -> non_neg_integer()) | atom(), [any()]) -> 'ok').
--spec(policy_changed/2 ::
-        (rabbit_types:exchange(), rabbit_types:exchange()) -> 'ok').
--spec(declare/6 ::
+         fun((boolean()) -> non_neg_integer()) | atom(), [any()]) -> 'ok'.
+-spec policy_changed
+        (rabbit_types:exchange(), rabbit_types:exchange()) -> 'ok'.
+-spec declare
         (name(), type(), boolean(), boolean(), boolean(),
          rabbit_framing:amqp_table())
-        -> rabbit_types:exchange()).
--spec(check_type/1 ::
-        (binary()) -> atom() | rabbit_types:connection_exit()).
--spec(assert_equivalence/6 ::
+        -> rabbit_types:exchange().
+-spec check_type
+        (binary()) -> atom() | rabbit_types:connection_exit().
+-spec assert_equivalence
         (rabbit_types:exchange(), atom(), boolean(), boolean(), boolean(),
          rabbit_framing:amqp_table())
-        -> 'ok' | rabbit_types:connection_exit()).
--spec(assert_args_equivalence/2 ::
+        -> 'ok' | rabbit_types:connection_exit().
+-spec assert_args_equivalence
         (rabbit_types:exchange(), rabbit_framing:amqp_table())
-        -> 'ok' | rabbit_types:connection_exit()).
--spec(lookup/1 ::
+        -> 'ok' | rabbit_types:connection_exit().
+-spec lookup
         (name()) -> rabbit_types:ok(rabbit_types:exchange()) |
-                    rabbit_types:error('not_found')).
--spec(lookup_or_die/1 ::
+                    rabbit_types:error('not_found').
+-spec lookup_or_die
         (name()) -> rabbit_types:exchange() |
-                    rabbit_types:channel_exit()).
--spec(list/0 :: () -> [rabbit_types:exchange()]).
--spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:exchange()]).
--spec(lookup_scratch/2 :: (name(), atom()) ->
+                    rabbit_types:channel_exit().
+-spec list() -> [rabbit_types:exchange()].
+-spec list(rabbit_types:vhost()) -> [rabbit_types:exchange()].
+-spec lookup_scratch(name(), atom()) ->
                                rabbit_types:ok(term()) |
-                               rabbit_types:error('not_found')).
--spec(update_scratch/3 :: (name(), atom(), fun((any()) -> any())) -> 'ok').
--spec(update/2 ::
+                               rabbit_types:error('not_found').
+-spec update_scratch(name(), atom(), fun((any()) -> any())) -> 'ok'.
+-spec update
         (name(),
          fun((rabbit_types:exchange()) -> rabbit_types:exchange()))
-         -> not_found | rabbit_types:exchange()).
--spec(update_decorators/1 :: (name()) -> 'ok').
--spec(immutable/1 :: (rabbit_types:exchange()) -> rabbit_types:exchange()).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (rabbit_types:exchange()) -> rabbit_types:infos()).
--spec(info/2 ::
+         -> not_found | rabbit_types:exchange().
+-spec update_decorators(name()) -> 'ok'.
+-spec immutable(rabbit_types:exchange()) -> rabbit_types:exchange().
+-spec info_keys() -> rabbit_types:info_keys().
+-spec info(rabbit_types:exchange()) -> rabbit_types:infos().
+-spec info
         (rabbit_types:exchange(), rabbit_types:info_keys())
-        -> rabbit_types:infos()).
--spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
--spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys())
-                   -> [rabbit_types:infos()]).
--spec(info_all/4 ::(rabbit_types:vhost(), rabbit_types:info_keys(),
+        -> rabbit_types:infos().
+-spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()].
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys())
+                   -> [rabbit_types:infos()].
+-spec info_all(rabbit_types:vhost(), rabbit_types:info_keys(),
                     reference(), pid())
-                   -> 'ok').
--spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery())
-                 -> [rabbit_amqqueue:name()]).
--spec(delete/2 ::
+                   -> 'ok'.
+-spec route(rabbit_types:exchange(), rabbit_types:delivery())
+                 -> [rabbit_amqqueue:name()].
+-spec delete
         (name(),  'true') -> 'ok' | rabbit_types:error('not_found' | 'in_use');
-        (name(), 'false') -> 'ok' | rabbit_types:error('not_found')).
--spec(validate_binding/2 ::
+        (name(), 'false') -> 'ok' | rabbit_types:error('not_found').
+-spec validate_binding
         (rabbit_types:exchange(), rabbit_types:binding())
-        -> rabbit_types:ok_or_error({'binding_invalid', string(), [any()]})).
--spec(maybe_auto_delete/2::
+        -> rabbit_types:ok_or_error({'binding_invalid', string(), [any()]}).
+-spec maybe_auto_delete
         (rabbit_types:exchange(), boolean())
-        -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}).
--spec(serial/1 :: (rabbit_types:exchange()) ->
-                       fun((boolean()) -> 'none' | pos_integer())).
--spec(peek_serial/1 :: (name()) -> pos_integer() | 'undefined').
-
--endif.
+        -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}.
+-spec serial(rabbit_types:exchange()) ->
+                       fun((boolean()) -> 'none' | pos_integer()).
+-spec peek_serial(name()) -> pos_integer() | 'undefined'.
 
 %%----------------------------------------------------------------------------
 
index fe344ba86e35fe09ec0c3ebb1122da6dfcf52aa4..196873aa22dba2a2f69f8f2f3863a86c9e55e90b 100644 (file)
                     {requires,    rabbit_registry},
                     {enables,     kernel_ready}]}).
 
--ifdef(use_specs).
--spec(headers_match/2 :: (rabbit_framing:amqp_table(),
-                          rabbit_framing:amqp_table()) -> boolean()).
--endif.
+-spec headers_match
+        (rabbit_framing:amqp_table(), rabbit_framing:amqp_table()) ->
+            boolean().
 
 description() ->
     [{description, <<"AMQP headers exchange, as per the AMQP specification">>}].
@@ -85,35 +84,51 @@ headers_match(Args, Data) ->
     MK = parse_x_match(rabbit_misc:table_lookup(Args, <<"x-match">>)),
     headers_match(Args, Data, true, false, MK).
 
-headers_match([], _Data, AllMatch, _AnyMatch, all) ->
-    AllMatch;
-headers_match([], _Data, _AllMatch, AnyMatch, any) ->
-    AnyMatch;
+% A bit less horrendous algorithm :)
+headers_match(_, _, false, _, all) -> false;
+headers_match(_, _, _, true, any) -> true;
+
+% No more bindings, return current state
+headers_match([], _Data, AllMatch, _AnyMatch, all) -> AllMatch;
+headers_match([], _Data, _AllMatch, AnyMatch, any) -> AnyMatch;
+
+% Delete bindings starting with x-
 headers_match([{<<"x-", _/binary>>, _PT, _PV} | PRest], Data,
               AllMatch, AnyMatch, MatchKind) ->
     headers_match(PRest, Data, AllMatch, AnyMatch, MatchKind);
+
+% No more data, but still bindings, false with all
 headers_match(_Pattern, [], _AllMatch, AnyMatch, MatchKind) ->
     headers_match([], [], false, AnyMatch, MatchKind);
+
+% Data key header not in binding, go next data
 headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest],
               AllMatch, AnyMatch, MatchKind) when PK > DK ->
     headers_match(Pattern, DRest, AllMatch, AnyMatch, MatchKind);
+
+% Binding key header not in data, false with all, go next binding
 headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _],
               _AllMatch, AnyMatch, MatchKind) when PK < DK ->
     headers_match(PRest, Data, false, AnyMatch, MatchKind);
-headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest],
-              AllMatch, AnyMatch, MatchKind) when PK == DK ->
-    {AllMatch1, AnyMatch1} =
-        case rabbit_misc:type_class(PT) == rabbit_misc:type_class(DT) of
-            %% It's not properly specified, but a "no value" in a
-            %% pattern field is supposed to mean simple presence of
-            %% the corresponding data field. I've interpreted that to
-            %% mean a type of "void" for the pattern field.
-            _ when PT == void -> {AllMatch, true};
-            false             -> {false, AnyMatch};
-            _ when PV == DV   -> {AllMatch, true};
-            _                 -> {false, AnyMatch}
-        end,
-    headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind).
+
+%% It's not properly specified, but a "no value" in a
+%% pattern field is supposed to mean simple presence of
+%% the corresponding data field. I've interpreted that to
+%% mean a type of "void" for the pattern field.
+headers_match([{PK, void, _PV} | PRest], [{DK, _DT, _DV} | DRest],
+              AllMatch, _AnyMatch, MatchKind) when PK == DK ->
+    headers_match(PRest, DRest, AllMatch, true, MatchKind);
+
+% Complete match, true with any, go next
+headers_match([{PK, _PT, PV} | PRest], [{DK, _DT, DV} | DRest],
+              AllMatch, _AnyMatch, MatchKind) when PK == DK andalso PV == DV ->
+    headers_match(PRest, DRest, AllMatch, true, MatchKind);
+
+% Value does not match, false with all, go next
+headers_match([{PK, _PT, _PV} | PRest], [{DK, _DT, _DV} | DRest],
+              _AllMatch, AnyMatch, MatchKind) when PK == DK ->
+    headers_match(PRest, DRest, false, AnyMatch, MatchKind).
+
 
 validate(_X) -> ok.
 create(_Tx, _X) -> ok.
index c8ca7ecae41c830f24d44fa61fd8680ed405d04d..2510c8a241c37701b22c58ca3fcc6ac6a24e10a7 100644 (file)
@@ -31,10 +31,8 @@ description() ->
 
 serialise_events() -> false.
 
--ifdef(use_specs).
--spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery())
-                 -> no_return()).
--endif.
+-spec route(rabbit_types:exchange(), rabbit_types:delivery()) -> no_return().
+
 route(#exchange{name = Name, type = Type}, _) ->
     rabbit_misc:protocol_error(
       precondition_failed,
index 6c4f0e5ccde07e8190908ad124d351a5c619f118..878b9da7a73b4be7c94f36801635276c84a3ac92 100644 (file)
@@ -23,6 +23,7 @@
 -export([append_file/2, ensure_parent_dirs_exist/1]).
 -export([rename/2, delete/1, recursive_delete/1, recursive_copy/2]).
 -export([lock_file/1]).
+-export([filename_as_a_directory/1]).
 
 -import(file_handle_cache, [with_handle/1, with_handle/2]).
 
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--type(ok_or_error() :: rabbit_types:ok_or_error(any())).
-
--spec(is_file/1 :: ((file:filename())) -> boolean()).
--spec(is_dir/1 :: ((file:filename())) -> boolean()).
--spec(file_size/1 :: ((file:filename())) -> non_neg_integer()).
--spec(ensure_dir/1 :: ((file:filename())) -> ok_or_error()).
--spec(wildcard/2 :: (string(), file:filename()) -> [file:filename()]).
--spec(list_dir/1 :: (file:filename()) -> rabbit_types:ok_or_error2(
-                                           [file:filename()], any())).
--spec(read_term_file/1 ::
-        (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any())).
--spec(write_term_file/2 :: (file:filename(), [any()]) -> ok_or_error()).
--spec(write_file/2 :: (file:filename(), iodata()) -> ok_or_error()).
--spec(write_file/3 :: (file:filename(), iodata(), [any()]) -> ok_or_error()).
--spec(append_file/2 :: (file:filename(), string()) -> ok_or_error()).
--spec(ensure_parent_dirs_exist/1 :: (string()) -> 'ok').
--spec(rename/2 ::
-        (file:filename(), file:filename()) -> ok_or_error()).
--spec(delete/1 :: ([file:filename()]) -> ok_or_error()).
--spec(recursive_delete/1 ::
-        ([file:filename()])
-        -> rabbit_types:ok_or_error({file:filename(), any()})).
--spec(recursive_copy/2 ::
-        (file:filename(), file:filename())
-        -> rabbit_types:ok_or_error({file:filename(), file:filename(), any()})).
--spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')).
-
--endif.
+-type ok_or_error() :: rabbit_types:ok_or_error(any()).
+
+-spec is_file((file:filename())) -> boolean().
+-spec is_dir((file:filename())) -> boolean().
+-spec file_size((file:filename())) -> non_neg_integer().
+-spec ensure_dir((file:filename())) -> ok_or_error().
+-spec wildcard(string(), file:filename()) -> [file:filename()].
+-spec list_dir(file:filename()) ->
+          rabbit_types:ok_or_error2([file:filename()], any()).
+-spec read_term_file
+        (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any()).
+-spec write_term_file(file:filename(), [any()]) -> ok_or_error().
+-spec write_file(file:filename(), iodata()) -> ok_or_error().
+-spec write_file(file:filename(), iodata(), [any()]) -> ok_or_error().
+-spec append_file(file:filename(), string()) -> ok_or_error().
+-spec ensure_parent_dirs_exist(string()) -> 'ok'.
+-spec rename(file:filename(), file:filename()) -> ok_or_error().
+-spec delete([file:filename()]) -> ok_or_error().
+-spec recursive_delete([file:filename()]) ->
+          rabbit_types:ok_or_error({file:filename(), any()}).
+-spec recursive_copy(file:filename(), file:filename()) ->
+          rabbit_types:ok_or_error({file:filename(), file:filename(), any()}).
+-spec lock_file(file:filename()) -> rabbit_types:ok_or_error('eexist').
+-spec filename_as_a_directory(file:filename()) -> file:filename().
 
 %%----------------------------------------------------------------------------
 
@@ -305,3 +300,11 @@ lock_file(Path) ->
                              ok = prim_file:close(Lock)
                    end)
     end.
+
+filename_as_a_directory(FileName) ->
+    case lists:last(FileName) of
+        "/" ->
+            FileName;
+        _ ->
+            FileName ++ "/"
+    end.
index e5b54dc4e003cdc14c7250b1d0b0b27043ca2487..e4a5013003f247ad772c4b06e0067b71aedae12c 100644 (file)
@@ -18,8 +18,6 @@
 
 -module(rabbit_framing).
 
--ifdef(use_specs).
-
 -export_type([protocol/0,
               amqp_field_type/0, amqp_property_type/0,
               amqp_table/0, amqp_array/0, amqp_value/0,
@@ -27,7 +25,7 @@
               amqp_method_field_name/0, amqp_property_record/0,
               amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]).
 
--type(protocol() :: 'rabbit_framing_amqp_0_8' | 'rabbit_framing_amqp_0_9_1').
+-type protocol() :: 'rabbit_framing_amqp_0_8' | 'rabbit_framing_amqp_0_9_1'.
 
 -define(protocol_type(T), type(T :: rabbit_framing_amqp_0_8:T |
                                     rabbit_framing_amqp_0_9_1:T)).
@@ -45,5 +43,3 @@
 -?protocol_type(amqp_exception()).
 -?protocol_type(amqp_exception_code()).
 -?protocol_type(amqp_class_id()).
-
--endif.
index 4d2b450409bef75eae9bc70acfd2819f1c322753..75f9df7b3faefe24a894c699d36fe73640680e9c 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([guid/0]).
 
--type(guid() :: binary()).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(filename/0 :: () -> string()).
--spec(gen/0 :: () -> guid()).
--spec(gen_secure/0 :: () -> guid()).
--spec(string/2 :: (guid(), any()) -> string()).
--spec(binary/2 :: (guid(), any()) -> binary()).
+-type guid() :: binary().
 
--endif.
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec filename() -> string().
+-spec gen() -> guid().
+-spec gen_secure() -> guid().
+-spec string(guid(), any()) -> string().
+-spec binary(guid(), any()) -> binary().
 
 %%----------------------------------------------------------------------------
 
index 0302d82839e61887934d06ad1e34f9f5e505bdda..d4597d4efcfaf428125b3313e25356d90e0849c6 100644 (file)
@@ -5,15 +5,15 @@
 %% practice 2 processes seems just as fast as any other number > 1,
 %% and keeps the progress bar realistic-ish.
 -define(HIPE_PROCESSES, 2).
--export([maybe_hipe_compile/0, log_hipe_result/1]).
 
-%% HiPE compilation happens before we have log handlers - so we have
-%% to io:format/2, it's all we can do.
+-export([maybe_hipe_compile/0, log_hipe_result/1]).
+-export([compile_to_directory/1]).
+-export([can_hipe_compile/0]).
 
+%% Compile and load during server startup sequence
 maybe_hipe_compile() ->
     {ok, Want} = application:get_env(rabbit, hipe_compile),
-    Can = code:which(hipe) =/= non_existing,
-    case {Want, Can} of
+    case {Want, can_hipe_compile()} of
         {true,  true}  -> hipe_compile();
         {true,  false} -> false;
         {false, _}     -> {ok, disabled}
@@ -33,33 +33,49 @@ log_hipe_result(false) ->
     rabbit_log:warning(
       "Not HiPE compiling: HiPE not found in this Erlang installation.~n").
 
+hipe_compile() ->
+    hipe_compile(fun compile_and_load/1, false).
+
+compile_to_directory(Dir0) ->
+    Dir = rabbit_file:filename_as_a_directory(Dir0),
+    ok = prepare_ebin_directory(Dir),
+    hipe_compile(fun (Mod) -> compile_and_save(Mod, Dir) end, true).
+
+needs_compilation(Mod, Force) ->
+    Exists = code:which(Mod) =/= non_existing,
+    %% We skip modules already natively compiled. This
+    %% happens when RabbitMQ is stopped (just the
+    %% application, not the entire node) and started
+    %% again.
+    NotYetCompiled = not already_hipe_compiled(Mod),
+    NotVersioned = not compiled_with_version_support(Mod),
+    Exists andalso (Force orelse (NotYetCompiled andalso NotVersioned)).
+
 %% HiPE compilation happens before we have log handlers and can take a
 %% long time, so make an exception to our no-stdout policy and display
 %% progress via stdout.
-hipe_compile() ->
+hipe_compile(CompileFun, Force) ->
     {ok, HipeModulesAll} = application:get_env(rabbit, hipe_modules),
-    HipeModules = [HM || HM <- HipeModulesAll,
-                   code:which(HM) =/= non_existing andalso
-                   %% We skip modules already natively compiled. This
-                   %% happens when RabbitMQ is stopped (just the
-                   %% application, not the entire node) and started
-                   %% again.
-                   already_hipe_compiled(HM)],
+    HipeModules = lists:filter(fun(Mod) -> needs_compilation(Mod, Force) end, HipeModulesAll),
     case HipeModules of
         [] -> {ok, already_compiled};
-        _  -> do_hipe_compile(HipeModules)
+        _  -> do_hipe_compile(HipeModules, CompileFun)
     end.
 
 already_hipe_compiled(Mod) ->
     try
     %% OTP 18.x or later
-       Mod:module_info(native) =:= false
+        Mod:module_info(native) =:= true
     %% OTP prior to 18.x
     catch error:badarg ->
-       code:is_module_native(Mod) =:= false
+        code:is_module_native(Mod) =:= true
     end.
 
-do_hipe_compile(HipeModules) ->
+compiled_with_version_support(Mod) ->
+    proplists:get_value(erlang_version_support, Mod:module_info(attributes))
+        =/= undefined.
+
+do_hipe_compile(HipeModules, CompileFun) ->
     Count = length(HipeModules),
     io:format("~nHiPE compiling:  |~s|~n                 |",
               [string:copies("-", Count)]),
@@ -74,11 +90,7 @@ do_hipe_compile(HipeModules) ->
     %% advanced API does not load automatically the code, except if the
     %% 'load' option is set.
     PidMRefs = [spawn_monitor(fun () -> [begin
-                                             {M, Beam, _} =
-                                               code:get_object_code(M),
-                                             {ok, _} =
-                                               hipe:compile(M, [], Beam,
-                                                            [o3, load]),
+                                             CompileFun(M),
                                              io:format("#")
                                          end || M <- Ms]
                               end) ||
@@ -96,3 +108,39 @@ split(L, N) -> split0(L, [[] || _ <- lists:seq(1, N)]).
 
 split0([],       Ls)       -> Ls;
 split0([I | Is], [L | Ls]) -> split0(Is, Ls ++ [[I | L]]).
+
+prepare_ebin_directory(Dir) ->
+    ok = rabbit_file:ensure_dir(Dir),
+    ok = delete_beam_files(Dir),
+    ok.
+
+delete_beam_files(Dir) ->
+    {ok, Files} = file:list_dir(Dir),
+    lists:foreach(fun(File) ->
+                          case filename:extension(File) of
+                              ".beam" ->
+                                  ok = file:delete(filename:join([Dir, File]));
+                              _ ->
+                                  ok
+                          end
+                  end,
+                  Files).
+
+compile_and_load(Mod) ->
+    {Mod, Beam, _} = code:get_object_code(Mod),
+    {ok, _} = hipe:compile(Mod, [], Beam, [o3, load]).
+
+compile_and_save(Module, Dir) ->
+    {Module, BeamCode, _} = code:get_object_code(Module),
+    BeamName = filename:join([Dir, atom_to_list(Module) ++ ".beam"]),
+    {ok, {Architecture, NativeCode}} = hipe:compile(Module, [], BeamCode, [o3]),
+    {ok, _, Chunks0} = beam_lib:all_chunks(BeamCode),
+    ChunkName = hipe_unified_loader:chunk_name(Architecture),
+    Chunks1 = lists:keydelete(ChunkName, 1, Chunks0),
+    Chunks = Chunks1 ++ [{ChunkName,NativeCode}],
+    {ok, BeamPlusNative} = beam_lib:build_module(Chunks),
+    ok = file:write_file(BeamName, BeamPlusNative),
+    BeamName.
+
+can_hipe_compile() ->
+    code:which(hipe) =/= non_existing.
index 0a2b8c5fc6754a3be127ea3e29e6e4666b8fe1e8..203e309b029504eb902822759b45b45ce5b91003 100644 (file)
 -record(lstate, {pid, prefetch_limited}).
 -record(qstate, {pid, state, credits}).
 
--ifdef(use_specs).
-
--type(lstate() :: #lstate{pid              :: pid(),
-                          prefetch_limited :: boolean()}).
--type(qstate() :: #qstate{pid :: pid(),
-                          state :: 'dormant' | 'active' | 'suspended'}).
-
--type(credit_mode() :: 'manual' | 'drain' | 'auto').
-
--spec(start_link/1 :: (rabbit_types:proc_name()) ->
-                           rabbit_types:ok_pid_or_error()).
--spec(new/1 :: (pid()) -> lstate()).
-
--spec(limit_prefetch/3      :: (lstate(), non_neg_integer(), non_neg_integer())
-                               -> lstate()).
--spec(unlimit_prefetch/1    :: (lstate()) -> lstate()).
--spec(is_active/1           :: (lstate()) -> boolean()).
--spec(get_prefetch_limit/1  :: (lstate()) -> non_neg_integer()).
--spec(ack/2                 :: (lstate(), non_neg_integer()) -> 'ok').
--spec(pid/1                 :: (lstate()) -> pid()).
-
--spec(client/1       :: (pid()) -> qstate()).
--spec(activate/1     :: (qstate()) -> qstate()).
--spec(can_send/3     :: (qstate(), boolean(), rabbit_types:ctag()) ->
-                             {'continue' | 'suspend', qstate()}).
--spec(resume/1       :: (qstate()) -> qstate()).
--spec(deactivate/1   :: (qstate()) -> qstate()).
--spec(is_suspended/1 :: (qstate()) -> boolean()).
--spec(is_consumer_blocked/2 :: (qstate(), rabbit_types:ctag()) -> boolean()).
--spec(credit/5 :: (qstate(), rabbit_types:ctag(), non_neg_integer(),
-                   credit_mode(), boolean()) -> {boolean(), qstate()}).
--spec(ack_from_queue/3 :: (qstate(), rabbit_types:ctag(), non_neg_integer())
-                          -> {boolean(), qstate()}).
--spec(drained/1 :: (qstate())
-                   -> {[{rabbit_types:ctag(), non_neg_integer()}], qstate()}).
--spec(forget_consumer/2 :: (qstate(), rabbit_types:ctag()) -> qstate()).
-
--endif.
+-type lstate() :: #lstate{pid              :: pid(),
+                          prefetch_limited :: boolean()}.
+-type qstate() :: #qstate{pid :: pid(),
+                          state :: 'dormant' | 'active' | 'suspended'}.
+
+-type credit_mode() :: 'manual' | 'drain' | 'auto'.
+
+-spec start_link(rabbit_types:proc_name()) ->
+                           rabbit_types:ok_pid_or_error().
+-spec new(pid()) -> lstate().
+
+-spec limit_prefetch(lstate(), non_neg_integer(), non_neg_integer()) ->
+          lstate().
+-spec unlimit_prefetch(lstate()) -> lstate().
+-spec is_active(lstate()) -> boolean().
+-spec get_prefetch_limit(lstate()) -> non_neg_integer().
+-spec ack(lstate(), non_neg_integer()) -> 'ok'.
+-spec pid(lstate()) -> pid().
+
+-spec client(pid()) -> qstate().
+-spec activate(qstate()) -> qstate().
+-spec can_send(qstate(), boolean(), rabbit_types:ctag()) ->
+          {'continue' | 'suspend', qstate()}.
+-spec resume(qstate()) -> qstate().
+-spec deactivate(qstate()) -> qstate().
+-spec is_suspended(qstate()) -> boolean().
+-spec is_consumer_blocked(qstate(), rabbit_types:ctag()) -> boolean().
+-spec credit
+        (qstate(), rabbit_types:ctag(), non_neg_integer(), credit_mode(),
+         boolean()) ->
+            {boolean(), qstate()}.
+-spec ack_from_queue(qstate(), rabbit_types:ctag(), non_neg_integer()) ->
+          {boolean(), qstate()}.
+-spec drained(qstate()) ->
+          {[{rabbit_types:ctag(), non_neg_integer()}], qstate()}.
+-spec forget_consumer(qstate(), rabbit_types:ctag()) -> qstate().
 
 %%----------------------------------------------------------------------------
 
@@ -434,7 +432,7 @@ notify_queues(State = #lim{ch_pid = ChPid, queues = Queues}) ->
             %% We randomly vary the position of queues in the list,
             %% thus ensuring that each queue has an equal chance of
             %% being notified first.
-            {L1, L2} = lists:split(random:uniform(L), QList),
+            {L1, L2} = lists:split(rand_compat:uniform(L), QList),
             [[ok = rabbit_amqqueue:resume(Q, ChPid) || Q <- L3]
              || L3 <- [L2, L1]],
             ok
index c6081fad0d0bfacaa994bcfa4e556d8c38f0a707..337fb23f840b5ee617bc37e71837c37f9295ebdd 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([level/0]).
 
--type(category() :: atom()).
--type(level() :: 'debug' | 'info' | 'warning' | 'error').
-
--spec(log/3 :: (category(), level(), string()) -> 'ok').
--spec(log/4 :: (category(), level(), string(), [any()]) -> 'ok').
+-type category() :: atom().
+-type level() :: 'debug' | 'info' | 'warning' | 'error'.
 
--spec(debug/1   :: (string()) -> 'ok').
--spec(debug/2   :: (string(), [any()]) -> 'ok').
--spec(info/1    :: (string()) -> 'ok').
--spec(info/2    :: (string(), [any()]) -> 'ok').
--spec(warning/1 :: (string()) -> 'ok').
--spec(warning/2 :: (string(), [any()]) -> 'ok').
--spec(error/1   :: (string()) -> 'ok').
--spec(error/2   :: (string(), [any()]) -> 'ok').
+-spec log(category(), level(), string()) -> 'ok'.
+-spec log(category(), level(), string(), [any()]) -> 'ok'.
 
--spec(with_local_io/1 :: (fun (() -> A)) -> A).
+-spec debug(string()) -> 'ok'.
+-spec debug(string(), [any()]) -> 'ok'.
+-spec info(string()) -> 'ok'.
+-spec info(string(), [any()]) -> 'ok'.
+-spec warning(string()) -> 'ok'.
+-spec warning(string(), [any()]) -> 'ok'.
+-spec error(string()) -> 'ok'.
+-spec error(string(), [any()]) -> 'ok'.
 
--endif.
+-spec with_local_io(fun (() -> A)) -> A.
 
 %%----------------------------------------------------------------------------
 
@@ -96,10 +92,20 @@ with_local_io(Fun) ->
     Node = node(),
     case node(GL) of
         Node -> Fun();
-        _    -> group_leader(whereis(user), self()),
+        _    -> set_group_leader_to_user_safely(whereis(user)),
                 try
                     Fun()
                 after
                     group_leader(GL, self())
                 end
     end.
+
+set_group_leader_to_user_safely(undefined) ->
+    handle_damaged_io_system();
+set_group_leader_to_user_safely(User) when is_pid(User) ->
+    group_leader(User, self()).
+
+handle_damaged_io_system() ->
+    Msg = "Erlang VM I/O system is damaged, restart needed~n",
+    io:format(standard_error, Msg, []),
+    exit(erlang_vm_restart_needed).
index 5e515bfb03fbcddda92f0d8b498407c643d3d705..6fd12b30ff74fe20df7c5bdd80af83daf46f3800 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(register/2 :: (pid(), {atom(),atom(),[any()]}) -> 'ok').
--spec(deregister/1 :: (pid()) -> 'ok').
--spec(report_ram_duration/2 ::
-        (pid(), float() | 'infinity') -> number() | 'infinity').
--spec(stop/0 :: () -> 'ok').
-
--endif.
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec register(pid(), {atom(),atom(),[any()]}) -> 'ok'.
+-spec deregister(pid()) -> 'ok'.
+-spec report_ram_duration
+        (pid(), float() | 'infinity') -> number() | 'infinity'.
+-spec stop() -> 'ok'.
 
 %%----------------------------------------------------------------------------
 %% Public API
index 1679767286e756e99c503774f14d4817e65f1b7a..221f11f18a01134d9bcb56cb0b53f1c259fe50a3 100644 (file)
                  depth_fun
                }).
 
--ifdef(use_specs).
-
--spec(start_link/4 :: (rabbit_types:amqqueue(), pid() | 'undefined',
-                       rabbit_mirror_queue_master:death_fun(),
-                       rabbit_mirror_queue_master:depth_fun()) ->
-                           rabbit_types:ok_pid_or_error()).
--spec(get_gm/1 :: (pid()) -> pid()).
--spec(ensure_monitoring/2 :: (pid(), [pid()]) -> 'ok').
-
--endif.
+-spec start_link
+        (rabbit_types:amqqueue(), pid() | 'undefined',
+         rabbit_mirror_queue_master:death_fun(),
+         rabbit_mirror_queue_master:depth_fun()) ->
+            rabbit_types:ok_pid_or_error().
+-spec get_gm(pid()) -> pid().
+-spec ensure_monitoring(pid(), [pid()]) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 %%
index 057a4fad3116ca74b821327d67a8bb0ca0d08718..d78f6180e7ab22f0815c15643e453909306ea01b 100644 (file)
                  backing_queue_state,
                  seen_status,
                  confirmed,
-                 known_senders
+                 known_senders,
+                 wait_timeout
                }).
 
--ifdef(use_specs).
-
 -export_type([death_fun/0, depth_fun/0, stats_fun/0]).
 
--type(death_fun() :: fun ((pid()) -> 'ok')).
--type(depth_fun() :: fun (() -> 'ok')).
--type(stats_fun() :: fun ((any()) -> 'ok')).
--type(master_state() :: #state { name                :: rabbit_amqqueue:name(),
+-type death_fun() :: fun ((pid()) -> 'ok').
+-type depth_fun() :: fun (() -> 'ok').
+-type stats_fun() :: fun ((any()) -> 'ok').
+-type master_state() :: #state { name                :: rabbit_amqqueue:name(),
                                  gm                  :: pid(),
                                  coordinator         :: pid(),
                                  backing_queue       :: atom(),
                                  backing_queue_state :: any(),
-                                 seen_status         :: dict:dict(),
+                                 seen_status         :: ?DICT_TYPE(),
                                  confirmed           :: [rabbit_guid:guid()],
-                                 known_senders       :: sets:set()
-                               }).
-
--spec(promote_backing_queue_state/8 ::
+                                 known_senders       :: ?SET_TYPE()
+                               }.
+-spec promote_backing_queue_state
         (rabbit_amqqueue:name(), pid(), atom(), any(), pid(), [any()],
-         dict:dict(), [pid()]) -> master_state()).
--spec(sender_death_fun/0 :: () -> death_fun()).
--spec(depth_fun/0 :: () -> depth_fun()).
--spec(init_with_existing_bq/3 :: (rabbit_types:amqqueue(), atom(), any()) ->
-                                      master_state()).
--spec(stop_mirroring/1 :: (master_state()) -> {atom(), any()}).
--spec(sync_mirrors/3 :: (stats_fun(), stats_fun(), master_state()) ->
-    {'ok', master_state()} | {stop, any(), master_state()}).
+         ?DICT_TYPE(), [pid()]) ->
+            master_state().
 
--endif.
+-spec sender_death_fun() -> death_fun().
+-spec depth_fun() -> depth_fun().
+-spec init_with_existing_bq(rabbit_types:amqqueue(), atom(), any()) ->
+          master_state().
+-spec stop_mirroring(master_state()) -> {atom(), any()}.
+-spec sync_mirrors(stats_fun(), stats_fun(), master_state()) ->
+          {'ok', master_state()} | {stop, any(), master_state()}.
 
 %% For general documentation of HA design, see
 %% rabbit_mirror_queue_coordinator
@@ -130,7 +128,8 @@ init_with_existing_bq(Q = #amqqueue{name = QName}, BQ, BQS) ->
              backing_queue_state = BQS,
              seen_status         = dict:new(),
              confirmed           = [],
-             known_senders       = sets:new() }.
+             known_senders       = sets:new(),
+             wait_timeout        = rabbit_misc:get_env(rabbit, slave_wait_timeout, 15000) }.
 
 stop_mirroring(State = #state { coordinator         = CPid,
                                 backing_queue       = BQ,
@@ -203,7 +202,7 @@ delete_and_terminate(Reason, State = #state { backing_queue       = BQ,
     stop_all_slaves(Reason, State),
     State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)}.
 
-stop_all_slaves(Reason, #state{name = QName, gm = GM}) ->
+stop_all_slaves(Reason, #state{name = QName, gm = GM, wait_timeout = WT}) ->
     {ok, #amqqueue{slave_pids = SPids}} = rabbit_amqqueue:lookup(QName),
     PidsMRefs = [{Pid, erlang:monitor(process, Pid)} || Pid <- [GM | SPids]],
     ok = gm:broadcast(GM, {delete_and_terminate, Reason}),
@@ -215,7 +214,7 @@ stop_all_slaves(Reason, #state{name = QName, gm = GM}) ->
     [receive
          {'DOWN', MRef, process, _Pid, _Info} ->
              ok
-     after 15000 ->
+     after WT ->
              rabbit_mirror_queue_misc:log_warning(
                QName, "Missing 'DOWN' message from ~p in node ~p~n",
                [Pid, node(Pid)]),
@@ -361,7 +360,7 @@ fetch(AckRequired, State = #state { backing_queue       = BQ,
     State1 = State #state { backing_queue_state = BQS1 },
     {Result, case Result of
                  empty                          -> State1;
-                 {_MsgId, _IsDelivered, AckTag} -> drop_one(AckTag, State1)
+                 {_MsgId, _IsDelivered, _AckTag} -> drop_one(AckRequired, State1)
              end}.
 
 drop(AckRequired, State = #state { backing_queue       = BQ,
@@ -370,7 +369,7 @@ drop(AckRequired, State = #state { backing_queue       = BQ,
     State1 = State #state { backing_queue_state = BQS1 },
     {Result, case Result of
                  empty            -> State1;
-                 {_MsgId, AckTag} -> drop_one(AckTag, State1)
+                 {_MsgId, _AckTag} -> drop_one(AckRequired, State1)
              end}.
 
 ack(AckTags, State = #state { gm                  = GM,
@@ -516,6 +515,7 @@ promote_backing_queue_state(QName, CPid, BQ, BQS, GM, AckTags, Seen, KS) ->
     Depth = BQ:depth(BQS1),
     true = Len == Depth, %% ASSERTION: everything must have been requeued
     ok = gm:broadcast(GM, {depth, Depth}),
+    WaitTimeout = rabbit_misc:get_env(rabbit, slave_wait_timeout, 15000),
     #state { name                = QName,
              gm                  = GM,
              coordinator         = CPid,
@@ -523,7 +523,8 @@ promote_backing_queue_state(QName, CPid, BQ, BQS, GM, AckTags, Seen, KS) ->
              backing_queue_state = BQS1,
              seen_status         = Seen,
              confirmed           = [],
-             known_senders       = sets:from_list(KS) }.
+             known_senders       = sets:from_list(KS),
+             wait_timeout        = WaitTimeout }.
 
 sender_death_fun() ->
     Self = self(),
@@ -554,10 +555,10 @@ depth_fun() ->
 %% Helpers
 %% ---------------------------------------------------------------------------
 
-drop_one(AckTag, State = #state { gm                  = GM,
-                                  backing_queue       = BQ,
-                                  backing_queue_state = BQS }) ->
-    ok = gm:broadcast(GM, {drop, BQ:len(BQS), 1, AckTag =/= undefined}),
+drop_one(AckRequired, State = #state { gm                  = GM,
+                                       backing_queue       = BQ,
+                                       backing_queue_state = BQS }) ->
+    ok = gm:broadcast(GM, {drop, BQ:len(BQS), 1, AckRequired}),
     State.
 
 drop(PrevLen, AckRequired, State = #state { gm                  = GM,
index 849efa36117422ab06219e9825f36aeed3963aaa..83350920e6c9a0c053e6e19b7d7b3eb83f98763f 100644 (file)
@@ -24,6 +24,8 @@
          maybe_auto_sync/1, maybe_drop_master_after_sync/1,
          sync_batch_size/1, log_info/3, log_warning/3]).
 
+-export([sync_queue/1, cancel_sync_queue/1]).
+
 %% for testing only
 -export([module/1]).
 
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(remove_from_queue/3 ::
-        (rabbit_amqqueue:name(), pid(), [pid()])
-        -> {'ok', pid(), [pid()], [node()]} | {'error', 'not_found'}).
--spec(on_node_up/0 :: () -> 'ok').
--spec(add_mirrors/3 :: (rabbit_amqqueue:name(), [node()], 'sync' | 'async')
-                       -> 'ok').
--spec(store_updated_slaves/1 :: (rabbit_types:amqqueue()) ->
-                                     rabbit_types:amqqueue()).
--spec(initial_queue_node/2 :: (rabbit_types:amqqueue(), node()) -> node()).
--spec(suggested_queue_nodes/1 :: (rabbit_types:amqqueue()) ->
-                                      {node(), [node()]}).
--spec(is_mirrored/1 :: (rabbit_types:amqqueue()) -> boolean()).
--spec(update_mirrors/2 ::
-        (rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok').
--spec(maybe_drop_master_after_sync/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(maybe_auto_sync/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(log_info/3 :: (rabbit_amqqueue:name(), string(), [any()]) -> 'ok').
--spec(log_warning/3 :: (rabbit_amqqueue:name(), string(), [any()]) -> 'ok').
-
--endif.
+-spec remove_from_queue
+        (rabbit_amqqueue:name(), pid(), [pid()]) ->
+            {'ok', pid(), [pid()], [node()]} | {'error', 'not_found'}.
+-spec on_node_up() -> 'ok'.
+-spec add_mirrors(rabbit_amqqueue:name(), [node()], 'sync' | 'async') ->
+          'ok'.
+-spec store_updated_slaves(rabbit_types:amqqueue()) ->
+          rabbit_types:amqqueue().
+-spec initial_queue_node(rabbit_types:amqqueue(), node()) -> node().
+-spec suggested_queue_nodes(rabbit_types:amqqueue()) ->
+          {node(), [node()]}.
+-spec is_mirrored(rabbit_types:amqqueue()) -> boolean().
+-spec update_mirrors
+        (rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok'.
+-spec maybe_drop_master_after_sync(rabbit_types:amqqueue()) -> 'ok'.
+-spec maybe_auto_sync(rabbit_types:amqqueue()) -> 'ok'.
+-spec log_info(rabbit_amqqueue:name(), string(), [any()]) -> 'ok'.
+-spec log_warning(rabbit_amqqueue:name(), string(), [any()]) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
@@ -364,6 +362,16 @@ maybe_auto_sync(Q = #amqqueue{pid = QPid}) ->
             ok
     end.
 
+sync_queue(Q) ->
+    rabbit_amqqueue:with(
+      Q, fun(#amqqueue{pid = QPid}) -> rabbit_amqqueue:sync_mirrors(QPid) end).
+
+cancel_sync_queue(Q) ->
+    rabbit_amqqueue:with(
+      Q, fun(#amqqueue{pid = QPid}) ->
+                 rabbit_amqqueue:cancel_sync_mirrors(QPid)
+         end).
+
 sync_batch_size(#amqqueue{} = Q) ->
     case policy(<<"ha-sync-batch-size">>, Q) of
         none -> %% we need this case because none > 1 == true
index 5bb243746a09fe52c585e912e23d0c757fc5b887..3733c7f0f8df92c1770ba707b01d1f917e2f10f9 100644 (file)
 
 -module(rabbit_mirror_queue_mode).
 
--ifdef(use_specs).
-
--type(master() :: node()).
--type(slave() :: node()).
--type(params() :: any()).
+-type master() :: node().
+-type slave() :: node().
+-type params() :: any().
 
 -callback description() -> [proplists:property()].
 
 %% Are the parameters valid for this mode?
 -callback validate_policy(params()) ->
     rabbit_policy_validator:validate_results().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
-    [{description, 0}, {suggested_queue_nodes, 5}, {validate_policy, 1}];
-behaviour_info(_Other) ->
-    undefined.
-
--endif.
index 4721ad613630c08e5d57b414995e22a82eddffed..593f0a4138bb6bbee274797a55faa5bbd68dd44a 100644 (file)
@@ -45,10 +45,7 @@ suggested_queue_nodes(Count, MNode, SNodes, _SSNodes, Poss) ->
             end}.
 
 shuffle(L) ->
-    random:seed(erlang:phash2([node()]),
-                time_compat:monotonic_time(),
-                time_compat:unique_integer()),
-    {_, L1} = lists:unzip(lists:keysort(1, [{random:uniform(), N} || N <- L])),
+    {_, L1} = lists:unzip(lists:keysort(1, [{rand_compat:uniform(), N} || N <- L])),
     L1.
 
 validate_policy(N) when is_integer(N) andalso N > 0 ->
index 9edb99c4d707225095f07077db3a423b161bf04c..6f46cdc69881707b65e124470f71f254856634cc 100644 (file)
@@ -120,7 +120,7 @@ handle_go(Q = #amqqueue{name = QName}) ->
                    Self, {rabbit_amqqueue, set_ram_duration_target, [Self]}),
             {ok, BQ} = application:get_env(backing_queue_module),
             Q1 = Q #amqqueue { pid = QPid },
-            ok = rabbit_queue_index:erase(QName), %% For crash recovery
+            _ = BQ:delete_crashed(Q), %% For crash recovery
             BQS = bq_init(BQ, Q1, new),
             State = #state { q                   = Q1,
                              gm                  = GM,
@@ -542,9 +542,8 @@ confirm_messages(MsgIds, State = #state { msg_id_status = MS }) ->
 handle_process_result({ok,   State}) -> noreply(State);
 handle_process_result({stop, State}) -> {stop, normal, State}.
 
--ifdef(use_specs).
--spec(promote_me/2 :: ({pid(), term()}, #state{}) -> no_return()).
--endif.
+-spec promote_me({pid(), term()}, #state{}) -> no_return().
+
 promote_me(From, #state { q                   = Q = #amqqueue { name = QName },
                           gm                  = GM,
                           backing_queue       = BQ,
index a97a9b50c86ac572dd859d0b4496259bf177681a..54f0855fce44ddfef7711e1e5675572e6ba324c4 100644 (file)
 %%                 ||                 || -- sync_complete --> ||
 %%                 ||               (Dies)                    ||
 
--ifdef(use_specs).
-
--type(log_fun() :: fun ((string(), [any()]) -> 'ok')).
--type(bq() :: atom()).
--type(bqs() :: any()).
--type(ack() :: any()).
--type(slave_sync_state() :: {[{rabbit_types:msg_id(), ack()}], timer:tref(),
-                             bqs()}).
-
--spec(master_prepare/4 :: (reference(), rabbit_amqqueue:name(),
-                               log_fun(), [pid()]) -> pid()).
--spec(master_go/8 :: (pid(), reference(), log_fun(),
+-type log_fun() :: fun ((string(), [any()]) -> 'ok').
+-type bq() :: atom().
+-type bqs() :: any().
+-type ack() :: any().
+-type slave_sync_state() :: {[{rabbit_types:msg_id(), ack()}], timer:tref(),
+                             bqs()}.
+
+-spec master_prepare(reference(), rabbit_amqqueue:name(),
+                               log_fun(), [pid()]) -> pid().
+-spec master_go(pid(), reference(), log_fun(),
                       rabbit_mirror_queue_master:stats_fun(),
                       rabbit_mirror_queue_master:stats_fun(),
                       non_neg_integer(),
                       bq(), bqs()) ->
                           {'already_synced', bqs()} | {'ok', bqs()} |
                           {'shutdown', any(), bqs()} |
-                          {'sync_died', any(), bqs()}).
--spec(slave/7 :: (non_neg_integer(), reference(), timer:tref(), pid(),
+                          {'sync_died', any(), bqs()}.
+-spec slave(non_neg_integer(), reference(), timer:tref(), pid(),
                   bq(), bqs(), fun((bq(), bqs()) -> {timer:tref(), bqs()})) ->
                       'denied' |
                       {'ok' | 'failed', slave_sync_state()} |
-                      {'stop', any(), slave_sync_state()}).
-
--endif.
+                      {'stop', any(), slave_sync_state()}.
 
 %% ---------------------------------------------------------------------------
 %% Master
index afd0508aac2d59011987a46a0327240b0703d25c..596eb62b031073d43067babc5df91c817cd005cb 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([node_type/0, cluster_status/0]).
 
--type(node_type() :: disc | ram).
--type(cluster_status() :: {[node()], [node()], [node()]}).
+-type node_type() :: disc | ram.
+-type cluster_status() :: {[node()], [node()], [node()]}.
 
 %% Main interface
--spec(init/0 :: () -> 'ok').
--spec(join_cluster/2 :: (node(), node_type())
-                        -> 'ok' | {'ok', 'already_member'}).
--spec(reset/0 :: () -> 'ok').
--spec(force_reset/0 :: () -> 'ok').
--spec(update_cluster_nodes/1 :: (node()) -> 'ok').
--spec(change_cluster_node_type/1 :: (node_type()) -> 'ok').
--spec(forget_cluster_node/2 :: (node(), boolean()) -> 'ok').
--spec(force_load_next_boot/0 :: () -> 'ok').
+-spec init() -> 'ok'.
+-spec join_cluster(node(), node_type())
+                        -> ok | {ok, already_member} | {error, {inconsistent_cluster, string()}}.
+-spec reset() -> 'ok'.
+-spec force_reset() -> 'ok'.
+-spec update_cluster_nodes(node()) -> 'ok'.
+-spec change_cluster_node_type(node_type()) -> 'ok'.
+-spec forget_cluster_node(node(), boolean()) -> 'ok'.
+-spec force_load_next_boot() -> 'ok'.
 
 %% Various queries to get the status of the db
--spec(status/0 :: () -> [{'nodes', [{node_type(), [node()]}]} |
+-spec status() -> [{'nodes', [{node_type(), [node()]}]} |
                          {'running_nodes', [node()]} |
-                         {'partitions', [{node(), [node()]}]}]).
--spec(is_clustered/0 :: () -> boolean()).
--spec(on_running_node/1 :: (pid()) -> boolean()).
--spec(is_process_alive/1 :: (pid()) -> boolean()).
--spec(cluster_nodes/1 :: ('all' | 'disc' | 'ram' | 'running') -> [node()]).
--spec(node_type/0 :: () -> node_type()).
--spec(dir/0 :: () -> file:filename()).
--spec(cluster_status_from_mnesia/0 :: () -> rabbit_types:ok_or_error2(
-                                              cluster_status(), any())).
+                         {'partitions', [{node(), [node()]}]}].
+-spec is_clustered() -> boolean().
+-spec on_running_node(pid()) -> boolean().
+-spec is_process_alive(pid()) -> boolean().
+-spec cluster_nodes('all' | 'disc' | 'ram' | 'running') -> [node()].
+-spec node_type() -> node_type().
+-spec dir() -> file:filename().
+-spec cluster_status_from_mnesia() -> rabbit_types:ok_or_error2(
+                                              cluster_status(), any()).
 
 %% Operations on the db and utils, mainly used in `rabbit_upgrade' and `rabbit'
--spec(init_db_unchecked/2 :: ([node()], node_type()) -> 'ok').
--spec(copy_db/1 :: (file:filename()) ->  rabbit_types:ok_or_error(any())).
--spec(check_cluster_consistency/0 :: () -> 'ok').
--spec(ensure_mnesia_dir/0 :: () -> 'ok').
+-spec init_db_unchecked([node()], node_type()) -> 'ok'.
+-spec copy_db(file:filename()) ->  rabbit_types:ok_or_error(any()).
+-spec check_cluster_consistency() -> 'ok'.
+-spec ensure_mnesia_dir() -> 'ok'.
 
 %% Hooks used in `rabbit_node_monitor'
--spec(on_node_up/1 :: (node()) -> 'ok').
--spec(on_node_down/1 :: (node()) -> 'ok').
-
--endif.
+-spec on_node_up(node()) -> 'ok'.
+-spec on_node_down(node()) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 %% Main interface
@@ -204,8 +200,17 @@ join_cluster(DiscoveryNode, NodeType) ->
                     {error, Reason}
             end;
         true ->
-            rabbit_log:info("Already member of cluster: ~p~n", [ClusterNodes]),
-            {ok, already_member}
+            %% DiscoveryNode thinks that we are part of a cluster, but
+            %% do we think so ourselves?
+            case are_we_clustered_with(DiscoveryNode) of
+                true ->
+                    rabbit_log:info("Asked to join a cluster but already a member of it: ~p~n", [ClusterNodes]),
+                    {ok, already_member};
+                false ->
+                    Msg = format_inconsistent_cluster_message(DiscoveryNode, node()),
+                    rabbit_log:error(Msg),
+                    {error, {inconsistent_cluster, Msg}}
+            end
     end.
 
 %% return node to its virgin state, where it is not member of any
@@ -423,6 +428,7 @@ cluster_status(WhichNodes) ->
 
 node_info() ->
     {rabbit_misc:otp_release(), rabbit_misc:version(),
+     mnesia:system_info(protocol_version),
      cluster_status_from_mnesia()}.
 
 node_type() ->
@@ -593,26 +599,37 @@ check_cluster_consistency() ->
     end.
 
 check_cluster_consistency(Node, CheckNodesConsistency) ->
-    case rpc:call(Node, rabbit_mnesia, node_info, []) of
+    case remote_node_info(Node) of
         {badrpc, _Reason} ->
             {error, not_found};
-        {_OTP, _Rabbit, {error, _}} ->
+        {_OTP, Rabbit, DelegateModuleHash, _Status} when is_binary(DelegateModuleHash) ->
+            %% when a delegate module .beam file hash is present
+            %% in the tuple, we are dealing with an old version
+            rabbit_version:version_error("Rabbit", rabbit_misc:version(), Rabbit);
+        {_OTP, _Rabbit, _Protocol, {error, _}} ->
             {error, not_found};
-        {OTP, Rabbit, {ok, Status}} when CheckNodesConsistency ->
-            case check_consistency(OTP, Rabbit, Node, Status) of
+        {OTP, Rabbit, Protocol, {ok, Status}} when CheckNodesConsistency ->
+            case check_consistency(Node, OTP, Rabbit, Protocol, Status) of
                 {error, _} = E -> E;
                 {ok, Res}      -> {ok, Res}
             end;
-        {OTP, Rabbit, {ok, Status}} ->
-            case check_consistency(OTP, Rabbit) of
+        {OTP, Rabbit, Protocol, {ok, Status}} ->
+            case check_consistency(Node, OTP, Rabbit, Protocol) of
                 {error, _} = E -> E;
                 ok             -> {ok, Status}
-            end;
-        {_OTP, Rabbit, _Hash, _Status} ->
-            %% delegate hash checking implies version mismatch
-            rabbit_version:version_error("Rabbit", rabbit_misc:version(), Rabbit)
+            end
     end.
 
+remote_node_info(Node) ->
+    case rpc:call(Node, rabbit_mnesia, node_info, []) of
+        {badrpc, _} = Error   -> Error;
+        %% RabbitMQ prior to 3.6.2
+        {OTP, Rabbit, Status} -> {OTP, Rabbit, unsupported, Status};
+        %% RabbitMQ 3.6.2 or later
+        {OTP, Rabbit, Protocol, Status} -> {OTP, Rabbit, Protocol, Status}
+    end.
+
+
 %%--------------------------------------------------------------------
 %% Hooks for `rabbit_node_monitor'
 %%--------------------------------------------------------------------
@@ -763,14 +780,14 @@ change_extra_db_nodes(ClusterNodes0, CheckOtherNodes) ->
             Nodes
     end.
 
-check_consistency(OTP, Rabbit) ->
+check_consistency(Node, OTP, Rabbit, ProtocolVersion) ->
     rabbit_misc:sequence_error(
-      [rabbit_version:check_otp_consistency(OTP),
+      [check_mnesia_or_otp_consistency(Node, ProtocolVersion, OTP),
        check_rabbit_consistency(Rabbit)]).
 
-check_consistency(OTP, Rabbit, Node, Status) ->
+check_consistency(Node, OTP, Rabbit, ProtocolVersion, Status) ->
     rabbit_misc:sequence_error(
-      [rabbit_version:check_otp_consistency(OTP),
+      [check_mnesia_or_otp_consistency(Node, ProtocolVersion, OTP),
        check_rabbit_consistency(Rabbit),
        check_nodes_consistency(Node, Status)]).
 
@@ -780,9 +797,56 @@ check_nodes_consistency(Node, RemoteStatus = {RemoteAllNodes, _, _}) ->
             {ok, RemoteStatus};
         false ->
             {error, {inconsistent_cluster,
-                     rabbit_misc:format("Node ~p thinks it's clustered "
-                                        "with node ~p, but ~p disagrees",
-                                        [node(), Node, Node])}}
+                     format_inconsistent_cluster_message(node(), Node)}}
+    end.
+
+check_mnesia_or_otp_consistency(_Node, unsupported, OTP) ->
+    rabbit_version:check_otp_consistency(OTP);
+check_mnesia_or_otp_consistency(Node, ProtocolVersion, _) ->
+    check_mnesia_consistency(Node, ProtocolVersion).
+
+check_mnesia_consistency(Node, ProtocolVersion) ->
+    % If mnesia is running we will just check protocol version
+    % If it's not running, we don't want it to join cluster until all checks pass
+    % so we start it without `dir` env variable to prevent
+    % joining cluster and/or corrupting data
+    with_running_or_clean_mnesia(fun() ->
+        case negotiate_protocol([Node]) of
+            [Node] -> ok;
+            []     ->
+                LocalVersion = mnesia:system_info(protocol_version),
+                {error, {inconsistent_cluster,
+                         rabbit_misc:format("Mnesia protocol negotiation failed."
+                                            " Local version: ~p."
+                                            " Remote version ~p",
+                                            [LocalVersion, ProtocolVersion])}}
+        end
+    end).
+
+negotiate_protocol([Node]) ->
+    mnesia_monitor:negotiate_protocol([Node]).
+
+with_running_or_clean_mnesia(Fun) ->
+    IsMnesiaRunning = case mnesia:system_info(is_running) of
+        yes      -> true;
+        no       -> false;
+        stopping ->
+            ensure_mnesia_not_running(),
+            false;
+        starting ->
+            ensure_mnesia_running(),
+            true
+    end,
+    case IsMnesiaRunning of
+        true  -> Fun();
+        false ->
+            {ok, MnesiaDir} = application:get_env(mnesia, dir),
+            application:unset_env(mnesia, dir),
+            mnesia:start(),
+            Result = Fun(),
+            application:stop(mnesia),
+            application:set_env(mnesia, dir, MnesiaDir),
+            Result
     end.
 
 check_rabbit_consistency(Remote) ->
@@ -819,22 +883,29 @@ find_auto_cluster_node([Node | Nodes]) ->
                      "Could not auto-cluster with ~s: " ++ Fmt, [Node | Args]),
                    find_auto_cluster_node(Nodes)
            end,
-    case rpc:call(Node, rabbit_mnesia, node_info, []) of
-        {badrpc, _} = Reason         -> Fail("~p~n", [Reason]);
+    case remote_node_info(Node) of
+        {badrpc, _} = Reason ->
+            Fail("~p~n", [Reason]);
         %% old delegate hash check
-        {_OTP, RMQ, _Hash, _}        -> Fail("version ~s~n", [RMQ]);
-        {_OTP, _RMQ, {error, _} = E} -> Fail("~p~n", [E]);
-        {OTP, RMQ, _}                -> case check_consistency(OTP, RMQ) of
-                                            {error, _} -> Fail("versions ~p~n",
-                                                               [{OTP, RMQ}]);
-                                            ok         -> {ok, Node}
-                                        end
+        {_OTP, RMQ, Hash, _} when is_binary(Hash) ->
+            Fail("version ~s~n", [RMQ]);
+        {_OTP, _RMQ, _Protocol, {error, _} = E} ->
+            Fail("~p~n", [E]);
+        {OTP, RMQ, Protocol, _} ->
+            case check_consistency(Node, OTP, RMQ, Protocol) of
+                {error, _} -> Fail("versions ~p~n",
+                                   [{OTP, RMQ}]);
+                ok         -> {ok, Node}
+            end
     end.
 
 is_only_clustered_disc_node() ->
     node_type() =:= disc andalso is_clustered() andalso
         cluster_nodes(disc) =:= [node()].
 
+are_we_clustered_with(Node) ->
+    lists:member(Node, mnesia_lib:all_nodes()).
+
 me_in_nodes(Nodes) -> lists:member(node(), Nodes).
 
 nodes_incl_me(Nodes) -> lists:usort([node()|Nodes]).
@@ -885,3 +956,8 @@ error_description(removing_node_from_offline_node) ->
         "from must be a disc node and all the other nodes must be offline.";
 error_description(no_running_cluster_nodes) ->
     "You cannot leave a cluster if no online nodes are present.".
+
+format_inconsistent_cluster_message(Thinker, Dissident) ->
+    rabbit_misc:format("Node ~p thinks it's clustered "
+                       "with node ~p, but ~p disagrees",
+                       [Thinker, Dissident, Dissident]).
index 1ece103cec8ac19ddc99a321f960e397f66b8ee3..0945e31522341c961e4a4ca69df5a38f46aa7ac2 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(rename/2 :: (node(), [{node(), node()}]) -> 'ok').
--spec(maybe_finish/1 :: ([node()]) -> 'ok').
-
--endif.
+-spec rename(node(), [{node(), node()}]) -> 'ok'.
+-spec maybe_finish([node()]) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
index 09a2407bec870c19adbcef63ebf8f9c86c258ef7..5c0acc5ffd903f53da74f11ba76d304c4b2d4f0f 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--type(io_device() :: any()).
--type(position() :: non_neg_integer()).
--type(msg_size() :: non_neg_integer()).
--type(file_size() :: non_neg_integer()).
--type(message_accumulator(A) ::
+-type io_device() :: any().
+-type position() :: non_neg_integer().
+-type msg_size() :: non_neg_integer().
+-type file_size() :: non_neg_integer().
+-type message_accumulator(A) ::
         fun (({rabbit_types:msg_id(), msg_size(), position(), binary()}, A) ->
-                    A)).
-
--spec(append/3 :: (io_device(), rabbit_types:msg_id(), msg()) ->
-                       rabbit_types:ok_or_error2(msg_size(), any())).
--spec(read/2 :: (io_device(), msg_size()) ->
-                     rabbit_types:ok_or_error2({rabbit_types:msg_id(), msg()},
-                                               any())).
--spec(scan/4 :: (io_device(), file_size(), message_accumulator(A), A) ->
-                     {'ok', A, position()}).
-
--endif.
+            A).
+
+-spec append(io_device(), rabbit_types:msg_id(), msg()) ->
+          rabbit_types:ok_or_error2(msg_size(), any()).
+-spec read(io_device(), msg_size()) ->
+          rabbit_types:ok_or_error2({rabbit_types:msg_id(), msg()},
+                                    any()).
+-spec scan(io_device(), file_size(), message_accumulator(A), A) ->
+          {'ok', A, position()}.
 
 %%----------------------------------------------------------------------------
 
index 6754c606bbf433bff871e9d96ab42cb65a158d22..d3ff077c8ba456179e6e8f1c309de251c00faa03 100644 (file)
@@ -91,6 +91,8 @@
           flying_ets,
           %% set of dying clients
           dying_clients,
+          %% index of file positions for client death messages
+          dying_client_index,
           %% map of references of all registered clients
           %% to callbacks
           clients,
           msg_store
         }).
 
-%%----------------------------------------------------------------------------
+-record(dying_client,
+        { client_ref,
+          file,
+          offset
+        }).
 
--ifdef(use_specs).
+%%----------------------------------------------------------------------------
 
 -export_type([gc_state/0, file_num/0]).
 
--type(gc_state() :: #gc_state { dir              :: file:filename(),
+-type gc_state() :: #gc_state { dir              :: file:filename(),
                                 index_module     :: atom(),
                                 index_state      :: any(),
                                 file_summary_ets :: ets:tid(),
                                 file_handles_ets :: ets:tid(),
                                 msg_store        :: server()
-                              }).
+                              }.
 
--type(server() :: pid() | atom()).
--type(client_ref() :: binary()).
--type(file_num() :: non_neg_integer()).
--type(client_msstate() :: #client_msstate {
+-type server() :: pid() | atom().
+-type client_ref() :: binary().
+-type file_num() :: non_neg_integer().
+-type client_msstate() :: #client_msstate {
                       server             :: server(),
                       client_ref         :: client_ref(),
-                      file_handle_cache  :: dict:dict(),
+                      file_handle_cache  :: ?DICT_TYPE(),
                       index_state        :: any(),
                       index_module       :: atom(),
                       dir                :: file:filename(),
                       file_summary_ets   :: ets:tid(),
                       cur_file_cache_ets :: ets:tid(),
                       flying_ets         :: ets:tid(),
-                      credit_disc_bound  :: {pos_integer(), pos_integer()}}).
--type(msg_ref_delta_gen(A) ::
+                      credit_disc_bound  :: {pos_integer(), pos_integer()}}.
+-type msg_ref_delta_gen(A) ::
         fun ((A) -> 'finished' |
-                    {rabbit_types:msg_id(), non_neg_integer(), A})).
--type(maybe_msg_id_fun() ::
-        'undefined' | fun ((gb_sets:set(), 'written' | 'ignored') -> any())).
--type(maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok')).
--type(deletion_thunk() :: fun (() -> boolean())).
+                    {rabbit_types:msg_id(), non_neg_integer(), A}).
+-type maybe_msg_id_fun() ::
+        'undefined' | fun ((?GB_SET_TYPE(), 'written' | 'ignored') -> any()).
+-type maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok').
+-type deletion_thunk() :: fun (() -> boolean()).
 
--spec(start_link/4 ::
+-spec start_link
         (atom(), file:filename(), [binary()] | 'undefined',
-         {msg_ref_delta_gen(A), A}) -> rabbit_types:ok_pid_or_error()).
--spec(successfully_recovered_state/1 :: (server()) -> boolean()).
--spec(client_init/4 :: (server(), client_ref(), maybe_msg_id_fun(),
-                        maybe_close_fds_fun()) -> client_msstate()).
--spec(client_terminate/1 :: (client_msstate()) -> 'ok').
--spec(client_delete_and_terminate/1 :: (client_msstate()) -> 'ok').
--spec(client_ref/1 :: (client_msstate()) -> client_ref()).
--spec(close_all_indicated/1 ::
-        (client_msstate()) -> rabbit_types:ok(client_msstate())).
--spec(write/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok').
--spec(write_flow/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok').
--spec(read/2 :: (rabbit_types:msg_id(), client_msstate()) ->
-                     {rabbit_types:ok(msg()) | 'not_found', client_msstate()}).
--spec(contains/2 :: (rabbit_types:msg_id(), client_msstate()) -> boolean()).
--spec(remove/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok').
-
--spec(set_maximum_since_use/2 :: (server(), non_neg_integer()) -> 'ok').
--spec(has_readers/2 :: (non_neg_integer(), gc_state()) -> boolean()).
--spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) ->
-                              deletion_thunk()).
--spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()).
--spec(force_recovery/2 :: (file:filename(), server()) -> 'ok').
--spec(transform_dir/3 :: (file:filename(), server(),
-        fun ((any()) -> (rabbit_types:ok_or_error2(msg(), any())))) -> 'ok').
-
--endif.
+         {msg_ref_delta_gen(A), A}) -> rabbit_types:ok_pid_or_error().
+-spec successfully_recovered_state(server()) -> boolean().
+-spec client_init(server(), client_ref(), maybe_msg_id_fun(),
+                        maybe_close_fds_fun()) -> client_msstate().
+-spec client_terminate(client_msstate()) -> 'ok'.
+-spec client_delete_and_terminate(client_msstate()) -> 'ok'.
+-spec client_ref(client_msstate()) -> client_ref().
+-spec close_all_indicated
+        (client_msstate()) -> rabbit_types:ok(client_msstate()).
+-spec write(rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'.
+-spec write_flow(rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok'.
+-spec read(rabbit_types:msg_id(), client_msstate()) ->
+                     {rabbit_types:ok(msg()) | 'not_found', client_msstate()}.
+-spec contains(rabbit_types:msg_id(), client_msstate()) -> boolean().
+-spec remove([rabbit_types:msg_id()], client_msstate()) -> 'ok'.
+
+-spec set_maximum_since_use(server(), non_neg_integer()) -> 'ok'.
+-spec has_readers(non_neg_integer(), gc_state()) -> boolean().
+-spec combine_files(non_neg_integer(), non_neg_integer(), gc_state()) ->
+                              deletion_thunk().
+-spec delete_file(non_neg_integer(), gc_state()) -> deletion_thunk().
+-spec force_recovery(file:filename(), server()) -> 'ok'.
+-spec transform_dir(file:filename(), server(),
+        fun ((any()) -> (rabbit_types:ok_or_error2(msg(), any())))) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
 %% performance with many healthy clients and few, if any, dying
 %% clients, which is the typical case.
 %%
+%% Client termination messages are stored in a separate ets index to
+%% avoid filling primary message store index and message files with
+%% client termination messages.
+%%
 %% When the msg_store has a backlog (i.e. it has unprocessed messages
 %% in its mailbox / gen_server priority queue), a further optimisation
 %% opportunity arises: we can eliminate pairs of 'write' and 'remove'
@@ -691,7 +699,9 @@ client_update_flying(Diff, MsgId, #client_msstate { flying_ets = FlyingEts,
     end.
 
 clear_client(CRef, State = #msstate { cref_to_msg_ids = CTM,
-                                      dying_clients = DyingClients }) ->
+                                      dying_clients = DyingClients,
+                                      dying_client_index = DyingIndex }) ->
+    ets:delete(DyingIndex, CRef),
     State #msstate { cref_to_msg_ids = dict:erase(CRef, CTM),
                      dying_clients = sets:del_element(CRef, DyingClients) }.
 
@@ -745,6 +755,8 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) ->
                               [ordered_set, public]),
     CurFileCacheEts = ets:new(rabbit_msg_store_cur_file, [set, public]),
     FlyingEts       = ets:new(rabbit_msg_store_flying, [set, public]),
+    DyingIndex      = ets:new(rabbit_msg_store_dying_client_index,
+                              [set, public, {keypos, #dying_client.client_ref}]),
 
     {ok, FileSizeLimit} = application:get_env(msg_store_file_size_limit),
 
@@ -776,6 +788,7 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) ->
                        cur_file_cache_ets     = CurFileCacheEts,
                        flying_ets             = FlyingEts,
                        dying_clients          = sets:new(),
+                       dying_client_index     = DyingIndex,
                        clients                = Clients,
                        successfully_recovered = CleanShutdown,
                        file_size_limit        = FileSizeLimit,
@@ -852,15 +865,21 @@ handle_call({contains, MsgId}, From, State) ->
     noreply(State1).
 
 handle_cast({client_dying, CRef},
-            State = #msstate { dying_clients = DyingClients }) ->
+            State = #msstate { dying_clients       = DyingClients,
+                               dying_client_index  = DyingIndex,
+                               current_file_handle = CurHdl,
+                               current_file        = CurFile }) ->
     DyingClients1 = sets:add_element(CRef, DyingClients),
-    noreply(write_message(CRef, <<>>,
-                          State #msstate { dying_clients = DyingClients1 }));
+    {ok, CurOffset} = file_handle_cache:current_virtual_offset(CurHdl),
+    true = ets:insert_new(DyingIndex, #dying_client{client_ref = CRef,
+                                                    file = CurFile,
+                                                    offset = CurOffset}),
+    noreply(State #msstate { dying_clients = DyingClients1 });
 
 handle_cast({client_delete, CRef},
             State = #msstate { clients = Clients }) ->
     State1 = State #msstate { clients = dict:erase(CRef, Clients) },
-    noreply(remove_message(CRef, CRef, clear_client(CRef, State1)));
+    noreply(clear_client(CRef, State1));
 
 handle_cast({write, CRef, MsgId, Flow},
             State = #msstate { cur_file_cache_ets = CurFileCacheEts,
@@ -1338,7 +1357,8 @@ blind_confirm(CRef, MsgIds, ActionTaken, State) ->
 %% msg and thus should be ignored. Note that this (correctly) returns
 %% false when testing to remove the death msg itself.
 should_mask_action(CRef, MsgId,
-                   State = #msstate { dying_clients = DyingClients }) ->
+                   State = #msstate { dying_clients = DyingClients,
+                                      dying_client_index = DyingIndex }) ->
     case {sets:is_element(CRef, DyingClients), index_lookup(MsgId, State)} of
         {false, Location} ->
             {false, Location};
@@ -1346,8 +1366,8 @@ should_mask_action(CRef, MsgId,
             {true, not_found};
         {true, #msg_location { file = File, offset = Offset,
                                ref_count = RefCount } = Location} ->
-            #msg_location { file = DeathFile, offset = DeathOffset } =
-                index_lookup(CRef, State),
+            [#dying_client { file = DeathFile, offset = DeathOffset }] =
+                ets:lookup(DyingIndex, CRef),
             {case {{DeathFile, DeathOffset} < {File, Offset}, RefCount} of
                  {true,  _} -> true;
                  {false, 0} -> false_if_increment;
@@ -1360,9 +1380,10 @@ should_mask_action(CRef, MsgId,
 %%----------------------------------------------------------------------------
 
 open_file(Dir, FileName, Mode) ->
-    file_handle_cache:open(form_filename(Dir, FileName), ?BINARY_MODE ++ Mode,
-                           [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE},
-                            {read_buffer,  ?HANDLE_CACHE_BUFFER_SIZE}]).
+    file_handle_cache:open_with_absolute_path(
+      form_filename(Dir, FileName), ?BINARY_MODE ++ Mode,
+      [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE},
+       {read_buffer,  ?HANDLE_CACHE_BUFFER_SIZE}]).
 
 close_handle(Key, CState = #client_msstate { file_handle_cache = FHC }) ->
     CState #client_msstate { file_handle_cache = close_handle(Key, FHC) };
@@ -2112,10 +2133,11 @@ transform_dir(BaseDir, Store, TransformFun) ->
 
 transform_msg_file(FileOld, FileNew, TransformFun) ->
     ok = rabbit_file:ensure_parent_dirs_exist(FileNew),
-    {ok, RefOld} = file_handle_cache:open(FileOld, [raw, binary, read], []),
-    {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write],
-                                          [{write_buffer,
-                                            ?HANDLE_CACHE_BUFFER_SIZE}]),
+    {ok, RefOld} = file_handle_cache:open_with_absolute_path(
+                     FileOld, [raw, binary, read], []),
+    {ok, RefNew} = file_handle_cache:open_with_absolute_path(
+                     FileNew, [raw, binary, write],
+                     [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]),
     {ok, _Acc, _IgnoreSize} =
         rabbit_msg_file:scan(
           RefOld, filelib:file_size(FileOld),
index b27aaf4c8485084b1e0361e2fbfc486891aee9b0..9cfdba8a8d5790a5bc543fd48dbfaf4d9a95958b 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/1 :: (rabbit_msg_store:gc_state()) ->
-                           rabbit_types:ok_pid_or_error()).
--spec(combine/3 :: (pid(), rabbit_msg_store:file_num(),
-                    rabbit_msg_store:file_num()) -> 'ok').
--spec(delete/2 :: (pid(), rabbit_msg_store:file_num()) -> 'ok').
--spec(no_readers/2 :: (pid(), rabbit_msg_store:file_num()) -> 'ok').
--spec(stop/1 :: (pid()) -> 'ok').
--spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
-
--endif.
+-spec start_link(rabbit_msg_store:gc_state()) ->
+                           rabbit_types:ok_pid_or_error().
+-spec combine(pid(), rabbit_msg_store:file_num(),
+                    rabbit_msg_store:file_num()) -> 'ok'.
+-spec delete(pid(), rabbit_msg_store:file_num()) -> 'ok'.
+-spec no_readers(pid(), rabbit_msg_store:file_num()) -> 'ok'.
+-spec stop(pid()) -> 'ok'.
+-spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
index 6f41836b98ee2a38fb2c216e4be3731bb10219ad..0322aacfd1511cf6f65d6689da83bdaae1da9b48 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
-
--spec(running_nodes_filename/0 :: () -> string()).
--spec(cluster_status_filename/0 :: () -> string()).
--spec(prepare_cluster_status_files/0 :: () -> 'ok').
--spec(write_cluster_status/1 :: (rabbit_mnesia:cluster_status()) -> 'ok').
--spec(read_cluster_status/0 :: () -> rabbit_mnesia:cluster_status()).
--spec(update_cluster_status/0 :: () -> 'ok').
--spec(reset_cluster_status/0 :: () -> 'ok').
-
--spec(notify_node_up/0 :: () -> 'ok').
--spec(notify_joined_cluster/0 :: () -> 'ok').
--spec(notify_left_cluster/1 :: (node()) -> 'ok').
-
--spec(partitions/0 :: () -> [node()]).
--spec(partitions/1 :: ([node()]) -> [{node(), [node()]}]).
--spec(status/1 :: ([node()]) -> {[{node(), [node()]}], [node()]}).
--spec(subscribe/1 :: (pid()) -> 'ok').
--spec(pause_partition_guard/0 :: () -> 'ok' | 'pausing').
-
--spec(all_rabbit_nodes_up/0 :: () -> boolean()).
--spec(run_outside_applications/2 :: (fun (() -> any()), boolean()) -> pid()).
--spec(ping_all/0 :: () -> 'ok').
--spec(alive_nodes/1 :: ([node()]) -> [node()]).
--spec(alive_rabbit_nodes/1 :: ([node()]) -> [node()]).
-
--endif.
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+
+-spec running_nodes_filename() -> string().
+-spec cluster_status_filename() -> string().
+-spec prepare_cluster_status_files() -> 'ok'.
+-spec write_cluster_status(rabbit_mnesia:cluster_status()) -> 'ok'.
+-spec read_cluster_status() -> rabbit_mnesia:cluster_status().
+-spec update_cluster_status() -> 'ok'.
+-spec reset_cluster_status() -> 'ok'.
+
+-spec notify_node_up() -> 'ok'.
+-spec notify_joined_cluster() -> 'ok'.
+-spec notify_left_cluster(node()) -> 'ok'.
+
+-spec partitions() -> [node()].
+-spec partitions([node()]) -> [{node(), [node()]}].
+-spec status([node()]) -> {[{node(), [node()]}], [node()]}.
+-spec subscribe(pid()) -> 'ok'.
+-spec pause_partition_guard() -> 'ok' | 'pausing'.
+
+-spec all_rabbit_nodes_up() -> boolean().
+-spec run_outside_applications(fun (() -> any()), boolean()) -> pid().
+-spec ping_all() -> 'ok'.
+-spec alive_nodes([node()]) -> [node()].
+-spec alive_rabbit_nodes([node()]) -> [node()].
 
 %%----------------------------------------------------------------------------
 %% Start
@@ -414,7 +410,12 @@ handle_cast({check_partial_partition, Node, Rep, NodeGUID, MyGUID, RepGUID},
                    fun () ->
                            case rpc:call(Node, rabbit, is_running, []) of
                                {badrpc, _} -> ok;
-                               _           -> cast(Rep, {partial_partition,
+                               _           ->  
+                                  rabbit_log:warning("Received a 'DOWN' message" 
+                                                     " from ~p but still can" 
+                                                     " communicate with it ~n",
+                                                     [Node]),
+                                  cast(Rep, {partial_partition,
                                                          Node, node(), RepGUID})
                            end
                    end);
index d5b0945de97786c2e9112110e9450a19a3012b87..b7987df1d806ef50bb0f4f4d52ba30fa249f90f1 100644 (file)
@@ -35,10 +35,7 @@ hash(HashingMod, Cleartext) ->
     <<SaltBin/binary, Hash/binary>>.
 
 generate_salt() ->
-    random:seed(erlang:phash2([node()]),
-        time_compat:monotonic_time(),
-        time_compat:unique_integer()),
-    Salt = random:uniform(16#ffffffff),
+    Salt = rand_compat:uniform(16#ffffffff),
     <<Salt:32>>.
 
 salted_hash(Salt, Cleartext) ->
index c7f5d501bf232c0fd61f72efaed1a1787b6b950f..4d8966f7e27adadcd37fd50da7fcc4096b2f9712 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--type(plugin_name() :: atom()).
-
--spec(setup/0 :: () -> [plugin_name()]).
--spec(active/0 :: () -> [plugin_name()]).
--spec(list/1 :: (string()) -> [#plugin{}]).
--spec(list/2 :: (string(), boolean()) -> [#plugin{}]).
--spec(read_enabled/1 :: (file:filename()) -> [plugin_name()]).
--spec(dependencies/3 :: (boolean(), [plugin_name()], [#plugin{}]) ->
-                             [plugin_name()]).
--spec(ensure/1  :: (string()) -> {'ok', [atom()], [atom()]} | {error, any()}).
--endif.
+-type plugin_name() :: atom().
+
+-spec setup() -> [plugin_name()].
+-spec active() -> [plugin_name()].
+-spec list(string()) -> [#plugin{}].
+-spec list(string(), boolean()) -> [#plugin{}].
+-spec read_enabled(file:filename()) -> [plugin_name()].
+-spec dependencies(boolean(), [plugin_name()], [#plugin{}]) ->
+                             [plugin_name()].
+-spec ensure(string()) -> {'ok', [atom()], [atom()]} | {error, any()}.
 
 %%----------------------------------------------------------------------------
 
index 4aeed4826c17cc90bf30da357a78688a2964a390..ff516268c6e58e2e76c90802027b8c7407bf6acc 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start/0 :: () -> no_return()).
--spec(stop/0 :: () -> 'ok').
-
--endif.
+-spec start() -> no_return().
+-spec stop() -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
@@ -173,7 +169,7 @@ format_plugins(Node, Pattern, Opts, #cli{all      = All,
 
     EnabledImplicitly = Implicit -- Enabled,
     {StatusMsg, Running} =
-        case rabbit_cli:rpc_call(Node, rabbit_plugins, active, []) of
+        case rabbit_misc:rpc_call(Node, rabbit_plugins, active, []) of
             {badrpc, _} -> {"[failed to contact ~s - status not shown]", []};
             Active      -> {"* = running on ~s", Active}
         end,
@@ -279,7 +275,7 @@ sync(Node, ForceOnline, #cli{file = File}) ->
 
 rpc_call(Node, Online, Mod, Fun, Args) ->
     io:format("~nApplying plugin configuration to ~s...", [Node]),
-    case rabbit_cli:rpc_call(Node, Mod, Fun, Args) of
+    case rabbit_misc:rpc_call(Node, Mod, Fun, Args) of
         {ok, [], []} ->
             io:format(" nothing to do.~n", []);
         {ok, Start, []} ->
index 1f7e521dfddeed7a085bf7530093b8285c681188..eb8cf6332737049c6a2ce609593cb1e38d68f2b9 100644 (file)
@@ -221,11 +221,11 @@ validate(_VHost, <<"policy">>, Name, Term, _User) ->
       Name, policy_validation(), Term).
 
 notify(VHost, <<"policy">>, Name, Term) ->
-    rabbit_event:notify(policy_set, [{name, Name} | Term]),
+    rabbit_event:notify(policy_set, [{name, Name}, {vhost, VHost} | Term]),
     update_policies(VHost).
 
 notify_clear(VHost, <<"policy">>, Name) ->
-    rabbit_event:notify(policy_cleared, [{name, Name}]),
+    rabbit_event:notify(policy_cleared, [{name, Name}, {vhost, VHost}]),
     update_policies(VHost).
 
 %%----------------------------------------------------------------------------
@@ -242,8 +242,10 @@ update_policies(VHost) ->
                  fun() ->
                          [mnesia:lock({table, T}, write) || T <- Tabs], %% [1]
                          case catch list(VHost) of
-                             {error, {no_such_vhost, _}} ->
-                                 ok; %% [2]
+                             {'EXIT', {throw, {error, {no_such_vhost, _}}}} ->
+                                 {[], []}; %% [2]
+                             {'EXIT', Exit} ->
+                                 exit(Exit);
                              Policies ->
                                  {[update_exchange(X, Policies) ||
                                       X <- rabbit_exchange:list(VHost)],
index 5ecdd75acc4277549ebdf8e9e9345c252e282f17..569a8d6c5a082eea048f08d0093fb3a5168e0a35 100644 (file)
 %% Specs
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start/0 :: () -> no_return()).
--spec(stop/0 :: () -> 'ok').
-
--endif.
+-spec start() -> no_return().
+-spec stop() -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
@@ -129,10 +125,9 @@ dist_port_use_check_ipv6(NodeHost, Port) ->
        {error, _} -> dist_port_use_check_fail(Port, NodeHost)
     end.
 
--ifdef(use_specs).
--spec(dist_port_use_check_fail/2 :: (non_neg_integer(), string()) ->
-                                         no_return()).
--endif.
+-spec dist_port_use_check_fail(non_neg_integer(), string()) ->
+                                         no_return().
+
 dist_port_use_check_fail(Port, Host) ->
     {ok, Names} = rabbit_nodes:names(Host),
     case [N || {N, P} <- Names, P =:= Port] of
index af96ea9f6fd2a45c69d3b0efa9509e0afc6707d3..5b2c24acab141bbbe66baaf37604582d4150bb19 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([start_mode/0]).
 
--type(start_mode() :: 'declare' | 'recovery' | 'slave').
-
--spec(start_link/3 :: (rabbit_types:amqqueue(), start_mode(), pid())
-                      -> rabbit_types:ok_pid_or_error()).
+-type start_mode() :: 'declare' | 'recovery' | 'slave'.
 
--endif.
+-spec start_link(rabbit_types:amqqueue(), start_mode(), pid())
+                      -> rabbit_types:ok_pid_or_error().
 
 %%----------------------------------------------------------------------------
 
index b58a8c535eb1da5b9f61034cff097715e83f3fb7..b7a3afd129561b32509785f70112b26229fa7243 100644 (file)
@@ -43,7 +43,7 @@
          info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
          zip_msgs_and_acks/4]).
 
--record(state, {bq, bqss}).
+-record(state, {bq, bqss, max_priority}).
 -record(passthrough, {bq, bqs}).
 
 %% See 'note on suffixes' below
@@ -157,7 +157,8 @@ init(Q, Recover, AsyncCallback) ->
                                     [{P, Init(P, Term)} || {P, Term} <- PsTerms]
                        end,
                 #state{bq   = BQ,
-                       bqss = BQSs}
+                       bqss = BQSs,
+                       max_priority = hd(Ps)}
     end.
 %% [0] collapse_recovery has the effect of making a list of recovery
 %% terms in priority order, even for non priority queues. It's easier
@@ -205,8 +206,8 @@ publish(Msg, MsgProps, IsDelivered, ChPid, Flow,
         State = #passthrough{bq = BQ, bqs = BQS}) ->
     ?passthrough1(publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS)).
 
-batch_publish(Publishes, ChPid, Flow, State = #state{bq = BQ}) ->
-    PubDict = partition_publish_batch(Publishes),
+batch_publish(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) ->
+    PubDict = partition_publish_batch(Publishes, MaxP),
     lists:foldl(
       fun ({Priority, Pubs}, St) ->
               pick1(fun (_P, BQSN) ->
@@ -227,8 +228,8 @@ publish_delivered(Msg, MsgProps, ChPid, Flow,
                   State = #passthrough{bq = BQ, bqs = BQS}) ->
     ?passthrough2(publish_delivered(Msg, MsgProps, ChPid, Flow, BQS)).
 
-batch_publish_delivered(Publishes, ChPid, Flow, State = #state{bq = BQ}) ->
-    PubDict = partition_publish_delivered_batch(Publishes),
+batch_publish_delivered(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) ->
+    PubDict = partition_publish_delivered_batch(Publishes, MaxP),
     {PrioritiesAndAcks, State1} =
         lists:foldl(
           fun ({Priority, Pubs}, {PriosAndAcks, St}) ->
@@ -404,7 +405,6 @@ msg_rates(#state{bq = BQ, bqss = BQSs}) ->
           end, {0.0, 0.0}, BQSs);
 msg_rates(#passthrough{bq = BQ, bqs = BQS}) ->
     BQ:msg_rates(BQS).
-
 info(backing_queue_status, #state{bq = BQ, bqss = BQSs}) ->
     fold0(fun (P, BQSN, Acc) ->
                   combine_status(P, BQ:info(backing_queue_status, BQSN), Acc)
@@ -420,6 +420,8 @@ info(Item, #passthrough{bq = BQ, bqs = BQS}) ->
 
 invoke(Mod, {P, Fun}, State = #state{bq = BQ}) ->
     pick1(fun (_P, BQSN) -> BQ:invoke(Mod, Fun, BQSN) end, P, State);
+invoke(Mod, Fun, State = #state{bq = BQ, max_priority = P}) ->
+    pick1(fun (_P, BQSN) -> BQ:invoke(Mod, Fun, BQSN) end, P, State);
 invoke(Mod, Fun, State = #passthrough{bq = BQ, bqs = BQS}) ->
     ?passthrough1(invoke(Mod, Fun, BQS)).
 
@@ -433,8 +435,8 @@ set_queue_mode(Mode, State = #state{bq = BQ}) ->
 set_queue_mode(Mode, State = #passthrough{bq = BQ, bqs = BQS}) ->
     ?passthrough1(set_queue_mode(Mode, BQS)).
 
-zip_msgs_and_acks(Msgs, AckTags, Accumulator, #state{}) ->
-    MsgsByPriority = partition_publish_delivered_batch(Msgs),
+zip_msgs_and_acks(Msgs, AckTags, Accumulator, #state{bqss = [{MaxP, _} |_]}) ->
+    MsgsByPriority = partition_publish_delivered_batch(Msgs, MaxP),
     lists:foldl(fun (Acks, MAs) ->
                         {P, _AckTag} = hd(Acks),
                         Pubs = orddict:fetch(P, MsgsByPriority),
@@ -484,13 +486,14 @@ foreach1(_Fun, [], BQSAcc) ->
 
 %% For a given thing, just go to its BQ
 pick1(Fun, Prioritisable, #state{bqss = BQSs} = State) ->
-    {P, BQSN} = priority(Prioritisable, BQSs),
+    {P, BQSN} = priority_bq(Prioritisable, BQSs),
     a(State#state{bqss = bq_store(P, Fun(P, BQSN), BQSs)}).
 
 %% Fold over results
 fold2(Fun, Acc, State = #state{bqss = BQSs}) ->
     {Res, BQSs1} = fold2(Fun, Acc, BQSs, []),
     {Res, a(State#state{bqss = BQSs1})}.
+
 fold2(Fun, Acc, [{P, BQSN} | Rest], BQSAcc) ->
     {Acc1, BQSN1} = Fun(P, BQSN, Acc),
     fold2(Fun, Acc1, Rest, [{P, BQSN1} | BQSAcc]);
@@ -532,7 +535,7 @@ fold_by_acktags2(Fun, AckTags, State) ->
 
 %% For a given thing, just go to its BQ
 pick2(Fun, Prioritisable, #state{bqss = BQSs} = State) ->
-    {P, BQSN} = priority(Prioritisable, BQSs),
+    {P, BQSN} = priority_bq(Prioritisable, BQSs),
     {Res, BQSN1} = Fun(P, BQSN),
     {Res, a(State#state{bqss = bq_store(P, BQSN1, BQSs)})}.
 
@@ -563,8 +566,8 @@ findfold3(Fun, Acc, NotFound, [{P, BQSN} | Rest], BQSAcc) ->
 findfold3(_Fun, Acc, NotFound, [], BQSAcc) ->
     {NotFound, Acc, lists:reverse(BQSAcc)}.
 
-bq_fetch(P, [])               -> exit({not_found, P});
-bq_fetch(P, [{P,  BQSN} | _]) -> BQSN;
+bq_fetch(P, []) -> exit({not_found, P});
+bq_fetch(P, [{P,  BQSN} | _]) -> {P, BQSN};
 bq_fetch(P, [{_, _BQSN} | T]) -> bq_fetch(P, T).
 
 bq_store(P, BQS, BQSs) ->
@@ -582,41 +585,41 @@ a(State = #state{bqss = BQSs}) ->
     end.
 
 %%----------------------------------------------------------------------------
-partition_publish_batch(Publishes) ->
+partition_publish_batch(Publishes, MaxP) ->
     partition_publishes(
-      Publishes, fun ({Msg, _, _}) -> Msg end).
+      Publishes, fun ({Msg, _, _}) -> Msg end, MaxP).
 
-partition_publish_delivered_batch(Publishes) ->
+partition_publish_delivered_batch(Publishes, MaxP) ->
     partition_publishes(
-      Publishes, fun ({Msg, _}) -> Msg end).
-
-partition_publishes(Publishes, ExtractMsg) ->
-    lists:foldl(fun (Pub, Dict) ->
-                        Msg = ExtractMsg(Pub),
-                        rabbit_misc:orddict_cons(priority2(Msg), Pub, Dict)
-                end, orddict:new(), Publishes).
-
-priority(P, BQSs) when is_integer(P) ->
-    {P, bq_fetch(P, BQSs)};
-priority(#basic_message{content = Content}, BQSs) ->
-    priority1(rabbit_binary_parser:ensure_content_decoded(Content), BQSs).
-
-priority1(_Content, [{P, BQSN}]) ->
-    {P, BQSN};
-priority1(Content, [{P, BQSN} | Rest]) ->
-    case priority2(Content) >= P of
-        true  -> {P, BQSN};
-        false -> priority1(Content, Rest)
-    end.
-
-priority2(#basic_message{content = Content}) ->
-    priority2(rabbit_binary_parser:ensure_content_decoded(Content));
-priority2(#content{properties = Props}) ->
+      Publishes, fun ({Msg, _}) -> Msg end, MaxP).
+
+partition_publishes(Publishes, ExtractMsg, MaxP) ->
+    Partitioned =
+        lists:foldl(fun (Pub, Dict) ->
+                            Msg = ExtractMsg(Pub),
+                            rabbit_misc:orddict_cons(priority(Msg, MaxP), Pub, Dict)
+                    end, orddict:new(), Publishes),
+    orddict:map(fun (_P, RevPubs) ->
+                        lists:reverse(RevPubs)
+                end, Partitioned).
+
+
+priority_bq(Priority, [{MaxP, _} | _] = BQSs) ->
+    bq_fetch(priority(Priority, MaxP), BQSs).
+
+%% Messages with a priority which is higher than the queue's maximum are treated
+%% as if they were published with the maximum priority.
+priority(undefined, _MaxP) ->
+    0;
+priority(Priority, MaxP) when is_integer(Priority), Priority =< MaxP ->
+    Priority;
+priority(Priority, MaxP) when is_integer(Priority), Priority > MaxP ->
+    MaxP;
+priority(#basic_message{content = Content}, MaxP) ->
+    priority(rabbit_binary_parser:ensure_content_decoded(Content), MaxP);
+priority(#content{properties = Props}, MaxP) ->
     #'P_basic'{priority = Priority0} = Props,
-    case Priority0 of
-        undefined                    -> 0;
-        _ when is_integer(Priority0) -> Priority0
-    end.
+    priority(Priority0, MaxP).
 
 add_maybe_infinity(infinity, _) -> infinity;
 add_maybe_infinity(_, infinity) -> infinity;
index 5b5c9b30744a05b6172ffb2c0358817e983ae8ac..a8002398e7c5f85e5697990fd35a0087844b8ad6 100644 (file)
@@ -49,8 +49,6 @@
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -type time_micros() :: non_neg_integer().
 -type ratio() :: float().
 -type state() :: #state{consumers ::priority_queue:q(),
@@ -94,8 +92,6 @@
              state()) -> 'unchanged' | {'unblocked', state()}.
 -spec utilisation(state()) -> ratio().
 
--endif.
-
 %%----------------------------------------------------------------------------
 
 new() -> #state{consumers = priority_queue:new(),
index 981d8e74ff000bb8cf134a7157f2458b7fb3ac3d..6a14854882382fab12ceb9da5c4ccfb453a29bd9 100644 (file)
 -rabbit_upgrade({store_msg_size, local, [avoid_zeroes]}).
 -rabbit_upgrade({store_msg,      local, [store_msg_size]}).
 
--ifdef(use_specs).
-
--type(hdl() :: ('undefined' | any())).
--type(segment() :: ('undefined' |
+-type hdl() :: ('undefined' | any()).
+-type segment() :: ('undefined' |
                     #segment { num                :: non_neg_integer(),
                                path               :: file:filename(),
-                               journal_entries    :: array:array(),
-                               entries_to_segment :: array:array(),
+                               journal_entries    :: ?ARRAY_TYPE(),
+                               entries_to_segment :: ?ARRAY_TYPE(),
                                unacked            :: non_neg_integer()
-                             })).
--type(seq_id() :: integer()).
--type(seg_dict() :: {dict:dict(), [segment()]}).
--type(on_sync_fun() :: fun ((gb_sets:set()) -> ok)).
--type(qistate() :: #qistate { dir                 :: file:filename(),
+                             }).
+-type seq_id() :: integer().
+-type seg_dict() :: {?DICT_TYPE(), [segment()]}.
+-type on_sync_fun() :: fun ((?GB_SET_TYPE()) -> ok).
+-type qistate() :: #qistate { dir                 :: file:filename(),
                               segments            :: 'undefined' | seg_dict(),
                               journal_handle      :: hdl(),
                               dirty_count         :: integer(),
                               max_journal_entries :: non_neg_integer(),
                               on_sync             :: on_sync_fun(),
                               on_sync_msg         :: on_sync_fun(),
-                              unconfirmed         :: gb_sets:set(),
-                              unconfirmed_msg     :: gb_sets:set(),
+                              unconfirmed         :: ?GB_SET_TYPE(),
+                              unconfirmed_msg     :: ?GB_SET_TYPE(),
                               pre_publish_cache   :: list(),
                               delivered_cache     :: list()
-                            }).
--type(contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean())).
--type(walker(A) :: fun ((A) -> 'finished' |
-                               {rabbit_types:msg_id(), non_neg_integer(), A})).
--type(shutdown_terms() :: [term()] | 'non_clean_shutdown').
-
--spec(erase/1 :: (rabbit_amqqueue:name()) -> 'ok').
--spec(reset_state/1 :: (qistate()) -> qistate()).
--spec(init/3 :: (rabbit_amqqueue:name(),
-                 on_sync_fun(), on_sync_fun()) -> qistate()).
--spec(recover/6 :: (rabbit_amqqueue:name(), shutdown_terms(), boolean(),
+                            }.
+-type contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean()).
+-type walker(A) :: fun ((A) -> 'finished' |
+                               {rabbit_types:msg_id(), non_neg_integer(), A}).
+-type shutdown_terms() :: [term()] | 'non_clean_shutdown'.
+
+-spec erase(rabbit_amqqueue:name()) -> 'ok'.
+-spec reset_state(qistate()) -> qistate().
+-spec init(rabbit_amqqueue:name(),
+                 on_sync_fun(), on_sync_fun()) -> qistate().
+-spec recover(rabbit_amqqueue:name(), shutdown_terms(), boolean(),
                     contains_predicate(),
                     on_sync_fun(), on_sync_fun()) ->
                         {'undefined' | non_neg_integer(),
-                         'undefined' | non_neg_integer(), qistate()}).
--spec(terminate/2 :: ([any()], qistate()) -> qistate()).
--spec(delete_and_terminate/1 :: (qistate()) -> qistate()).
--spec(publish/6 :: (rabbit_types:msg_id(), seq_id(),
+                         'undefined' | non_neg_integer(), qistate()}.
+-spec terminate([any()], qistate()) -> qistate().
+-spec delete_and_terminate(qistate()) -> qistate().
+-spec publish(rabbit_types:msg_id(), seq_id(),
                     rabbit_types:message_properties(), boolean(),
-                    non_neg_integer(), qistate()) -> qistate()).
--spec(deliver/2 :: ([seq_id()], qistate()) -> qistate()).
--spec(ack/2 :: ([seq_id()], qistate()) -> qistate()).
--spec(sync/1 :: (qistate()) -> qistate()).
--spec(needs_sync/1 :: (qistate()) -> 'confirms' | 'other' | 'false').
--spec(flush/1 :: (qistate()) -> qistate()).
--spec(read/3 :: (seq_id(), seq_id(), qistate()) ->
+                    non_neg_integer(), qistate()) -> qistate().
+-spec deliver([seq_id()], qistate()) -> qistate().
+-spec ack([seq_id()], qistate()) -> qistate().
+-spec sync(qistate()) -> qistate().
+-spec needs_sync(qistate()) -> 'confirms' | 'other' | 'false'.
+-spec flush(qistate()) -> qistate().
+-spec read(seq_id(), seq_id(), qistate()) ->
                      {[{rabbit_types:msg_id(), seq_id(),
                         rabbit_types:message_properties(),
-                        boolean(), boolean()}], qistate()}).
--spec(next_segment_boundary/1 :: (seq_id()) -> seq_id()).
--spec(bounds/1 :: (qistate()) ->
-                       {non_neg_integer(), non_neg_integer(), qistate()}).
--spec(start/1 :: ([rabbit_amqqueue:name()]) -> {[[any()]], {walker(A), A}}).
-
--spec(add_queue_ttl/0 :: () -> 'ok').
+                        boolean(), boolean()}], qistate()}.
+-spec next_segment_boundary(seq_id()) -> seq_id().
+-spec bounds(qistate()) ->
+                       {non_neg_integer(), non_neg_integer(), qistate()}.
+-spec start([rabbit_amqqueue:name()]) -> {[[any()]], {walker(A), A}}.
 
--endif.
+-spec add_queue_ttl() -> 'ok'.
 
 
 %%----------------------------------------------------------------------------
@@ -816,8 +812,9 @@ append_journal_to_segment(#segment { journal_entries = JEntries,
         _ ->
             file_handle_cache_stats:update(queue_index_write),
 
-            {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE,
-                                               [{write_buffer, infinity}]),
+            {ok, Hdl} = file_handle_cache:open_with_absolute_path(
+                          Path, ?WRITE_MODE,
+                          [{write_buffer, infinity}]),
             %% the file_handle_cache also does a list reverse, so this
             %% might not be required here, but before we were doing a
             %% sparse_foldr, a lists:reverse/1 seems to be the correct
@@ -832,8 +829,8 @@ get_journal_handle(State = #qistate { journal_handle = undefined,
                                       dir = Dir }) ->
     Path = filename:join(Dir, ?JOURNAL_FILENAME),
     ok = rabbit_file:ensure_dir(Path),
-    {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE,
-                                       [{write_buffer, infinity}]),
+    {ok, Hdl} = file_handle_cache:open_with_absolute_path(
+                  Path, ?WRITE_MODE, [{write_buffer, infinity}]),
     {Hdl, State #qistate { journal_handle = Hdl }};
 get_journal_handle(State = #qistate { journal_handle = Hdl }) ->
     {Hdl, State}.
@@ -1058,7 +1055,8 @@ load_segment(KeepAcked, #segment { path = Path }) ->
         false -> Empty;
         true  -> Size = rabbit_file:file_size(Path),
                  file_handle_cache_stats:update(queue_index_read),
-                 {ok, Hdl} = file_handle_cache:open(Path, ?READ_MODE, []),
+                 {ok, Hdl} = file_handle_cache:open_with_absolute_path(
+                               Path, ?READ_MODE, []),
                  {ok, 0} = file_handle_cache:position(Hdl, bof),
                  {ok, SegBin} = file_handle_cache:read(Hdl, Size),
                  ok = file_handle_cache:close(Hdl),
@@ -1383,10 +1381,11 @@ transform_file(Path, Fun) when is_function(Fun)->
     case rabbit_file:file_size(Path) of
         0    -> ok;
         Size -> {ok, PathTmpHdl} =
-                    file_handle_cache:open(PathTmp, ?WRITE_MODE,
-                                           [{write_buffer, infinity}]),
+                    file_handle_cache:open_with_absolute_path(
+                      PathTmp, ?WRITE_MODE,
+                      [{write_buffer, infinity}]),
 
-                {ok, PathHdl} = file_handle_cache:open(
+                {ok, PathHdl} = file_handle_cache:open_with_absolute_path(
                                   Path, ?READ_MODE, [{read_buffer, Size}]),
                 {ok, Content} = file_handle_cache:read(PathHdl, Size),
                 ok = file_handle_cache:close(PathHdl),
index 44394a962c7960c2577d1e923db083983fac8bfb..c5aad50e6415d7e49b12f7376a4b236d1dffd7b8 100644 (file)
@@ -26,7 +26,9 @@
                     {mfa, {rabbit_registry, register,
                            [policy_validator,
                             <<"queue-master-locator">>,
-                            ?MODULE]}}]}).
+                            ?MODULE]}},
+                   {requires, rabbit_registry},
+                   {enables, recovery}]}).
 
 validate_policy(KeyList) ->
     case proplists:lookup(<<"queue-master-locator">> , KeyList) of
index e47ae67bfb39f25940fe284972179b4c55167872..f6f94eca45b1e36eed213544579734bb7f646b7a 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start() -> rabbit_types:ok_or_error(term())).
--spec(stop() -> rabbit_types:ok_or_error(term())).
--spec(store(file:filename(), term()) -> rabbit_types:ok_or_error(term())).
--spec(read(file:filename()) -> rabbit_types:ok_or_error2(term(), not_found)).
--spec(clear() -> 'ok').
-
--endif. % use_specs
+-spec start() -> rabbit_types:ok_or_error(term()).
+-spec stop() -> rabbit_types:ok_or_error(term()).
+-spec store(file:filename(), term()) -> rabbit_types:ok_or_error(term()).
+-spec read(file:filename()) -> rabbit_types:ok_or_error2(term(), not_found).
+-spec clear() -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
index db61c958ec2dd2dae0f65353a36fa989402f0777..0428c3533fe52af6e1439d85857e430c55c8fc7a 100644 (file)
 -define(SERVER, ?MODULE).
 -define(ETS_NAME, ?MODULE).
 
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(register/3 :: (atom(), binary(), atom()) -> 'ok').
--spec(unregister/2 :: (atom(), binary()) -> 'ok').
--spec(binary_to_type/1 ::
-        (binary()) -> atom() | rabbit_types:error('not_found')).
--spec(lookup_module/2 ::
-        (atom(), atom()) -> rabbit_types:ok_or_error2(atom(), 'not_found')).
--spec(lookup_all/1 :: (atom()) -> [{atom(), atom()}]).
-
--endif.
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec register(atom(), binary(), atom()) -> 'ok'.
+-spec unregister(atom(), binary()) -> 'ok'.
+-spec binary_to_type
+        (binary()) -> atom() | rabbit_types:error('not_found').
+-spec lookup_module
+        (atom(), atom()) -> rabbit_types:ok_or_error2(atom(), 'not_found').
+-spec lookup_all(atom()) -> [{atom(), atom()}].
 
 %%---------------------------------------------------------------------------
 
index 80db6e87e11826bcc797799afe7fcf6c482c96a5..56faefe536ae967a60376af5bf3902ae5f9d456e 100644 (file)
 
 -export([parse_information_unit/1]).
 
--ifdef(use_spec).
-
--spec(parse_information_unit/1 :: (integer() | string()) ->
-                                       {ok, integer()} | {error, parse_error}).
-
--endif.
+-spec parse_information_unit(integer() | string()) ->
+          {ok, integer()} | {error, parse_error}.
 
 parse_information_unit(Value) when is_integer(Value) -> {ok, Value};
 parse_information_unit(Value) when is_list(Value) ->
index ed35556f30e58b59d7e832fc631c925fadf07aca..196025918827075c5569c5c8407161f4f642a49f 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/3 :: (atom(), rabbit_types:mfargs(), boolean()) ->
-                           rabbit_types:ok_pid_or_error()).
-
--endif.
+-spec start_link(atom(), rabbit_types:mfargs(), boolean()) ->
+                           rabbit_types:ok_pid_or_error().
 
 %%----------------------------------------------------------------------------
 
@@ -45,4 +41,4 @@ init([{Mod, _F, _A} = Fun, Delay]) ->
           [{Mod, Fun, case Delay of
                           true  -> {transient, 1};
                           false -> transient
-                      end, ?MAX_WAIT, worker, [Mod]}]}}.
+                      end, ?WORKER_WAIT, worker, [Mod]}]}}.
index 42b67d6681c7dbdac9cb57aefb9658ad6226acfa..d4390ac4d85a56ba86c881eeb467e940745d7393 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([routing_key/0, match_result/0]).
 
--type(routing_key() :: binary()).
--type(match_result() :: [rabbit_types:binding_destination()]).
+-type routing_key() :: binary().
+-type match_result() :: [rabbit_types:binding_destination()].
 
--spec(match_bindings/2 :: (rabbit_types:binding_source(),
+-spec match_bindings(rabbit_types:binding_source(),
                            fun ((rabbit_types:binding()) -> boolean())) ->
-    match_result()).
--spec(match_routing_key/2 :: (rabbit_types:binding_source(),
+    match_result().
+-spec match_routing_key(rabbit_types:binding_source(),
                              [routing_key()] | ['_']) ->
-    match_result()).
-
--endif.
+    match_result().
 
 %%----------------------------------------------------------------------------
 
index ba1a830df1b777aaa2db9f757a3c2665956bdac1..97f78da8ba5c3197bcdbcfc7c0f38b3152d095fb 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--type(ok_or_error_string() :: 'ok' | {'error_string', string()}).
--type(ok_thunk_or_error_string() :: ok_or_error_string() | fun(() -> 'ok')).
-
--spec(parse_set/5 :: (rabbit_types:vhost(), binary(), binary(), string(),
-                      rabbit_types:user() | 'none') -> ok_or_error_string()).
--spec(set/5 :: (rabbit_types:vhost(), binary(), binary(), term(),
-                rabbit_types:user() | 'none') -> ok_or_error_string()).
--spec(set_any/5 :: (rabbit_types:vhost(), binary(), binary(), term(),
-                    rabbit_types:user() | 'none') -> ok_or_error_string()).
--spec(set_global/2 :: (atom(), term()) -> 'ok').
--spec(clear/3 :: (rabbit_types:vhost(), binary(), binary())
-                 -> ok_thunk_or_error_string()).
--spec(clear_any/3 :: (rabbit_types:vhost(), binary(), binary())
-                     -> ok_thunk_or_error_string()).
--spec(list/0 :: () -> [rabbit_types:infos()]).
--spec(list/1 :: (rabbit_types:vhost() | '_') -> [rabbit_types:infos()]).
--spec(list_component/1 :: (binary()) -> [rabbit_types:infos()]).
--spec(list/2 :: (rabbit_types:vhost() | '_', binary() | '_')
-                -> [rabbit_types:infos()]).
--spec(list_formatted/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
--spec(list_formatted/3 :: (rabbit_types:vhost(), reference(), pid()) -> 'ok').
--spec(lookup/3 :: (rabbit_types:vhost(), binary(), binary())
-                  -> rabbit_types:infos() | 'not_found').
--spec(value/3 :: (rabbit_types:vhost(), binary(), binary()) -> term()).
--spec(value/4 :: (rabbit_types:vhost(), binary(), binary(), term()) -> term()).
--spec(value_global/1 :: (atom()) -> term() | 'not_found').
--spec(value_global/2 :: (atom(), term()) -> term()).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
-
--endif.
+-type ok_or_error_string() :: 'ok' | {'error_string', string()}.
+-type ok_thunk_or_error_string() :: ok_or_error_string() | fun(() -> 'ok').
+
+-spec parse_set(rabbit_types:vhost(), binary(), binary(), string(),
+                      rabbit_types:user() | 'none') -> ok_or_error_string().
+-spec set(rabbit_types:vhost(), binary(), binary(), term(),
+                rabbit_types:user() | 'none') -> ok_or_error_string().
+-spec set_any(rabbit_types:vhost(), binary(), binary(), term(),
+                    rabbit_types:user() | 'none') -> ok_or_error_string().
+-spec set_global(atom(), term()) -> 'ok'.
+-spec clear(rabbit_types:vhost(), binary(), binary())
+                 -> ok_thunk_or_error_string().
+-spec clear_any(rabbit_types:vhost(), binary(), binary())
+                     -> ok_thunk_or_error_string().
+-spec list() -> [rabbit_types:infos()].
+-spec list(rabbit_types:vhost() | '_') -> [rabbit_types:infos()].
+-spec list_component(binary()) -> [rabbit_types:infos()].
+-spec list(rabbit_types:vhost() | '_', binary() | '_')
+                -> [rabbit_types:infos()].
+-spec list_formatted(rabbit_types:vhost()) -> [rabbit_types:infos()].
+-spec list_formatted(rabbit_types:vhost(), reference(), pid()) -> 'ok'.
+-spec lookup(rabbit_types:vhost(), binary(), binary())
+                  -> rabbit_types:infos() | 'not_found'.
+-spec value(rabbit_types:vhost(), binary(), binary()) -> term().
+-spec value(rabbit_types:vhost(), binary(), binary(), term()) -> term().
+-spec value_global(atom()) -> term() | 'not_found'.
+-spec value_global(atom(), term()) -> term().
+-spec info_keys() -> rabbit_types:info_keys().
 
 %%---------------------------------------------------------------------------
 
index 38769e1835e1c371829383918b1969949e32f98e..ac9fb204d0b194234956973499e4bcd3995e8d83 100644 (file)
 
 %%--------------------------------------------------------------------------
 
--ifdef(use_specs).
-
 -export_type([certificate/0]).
 
--type(certificate() :: binary()).
-
--spec(peer_cert_issuer/1        :: (certificate()) -> string()).
--spec(peer_cert_subject/1       :: (certificate()) -> string()).
--spec(peer_cert_validity/1      :: (certificate()) -> string()).
--spec(peer_cert_subject_items/2  ::
-        (certificate(), tuple()) -> [string()] | 'not_found').
--spec(peer_cert_auth_name/1 ::
-        (certificate()) -> binary() | 'not_found' | 'unsafe').
+-type certificate() :: binary().
 
--endif.
+-spec peer_cert_issuer(certificate()) -> string().
+-spec peer_cert_subject(certificate()) -> string().
+-spec peer_cert_validity(certificate()) -> string().
+-spec peer_cert_subject_items
+        (certificate(), tuple()) -> [string()] | 'not_found'.
+-spec peer_cert_auth_name
+        (certificate()) -> binary() | 'not_found' | 'unsafe'.
 
 %%--------------------------------------------------------------------------
 %% High-level functions used by reader
index 501623d96b84af8a5ed9380b54c413df97d703c7..ad70540e5b26c571ef4387ee821077ec5f274047 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start_child/1 :: (atom()) -> 'ok').
--spec(start_child/2 :: (atom(), [any()]) -> 'ok').
--spec(start_child/3 :: (atom(), atom(), [any()]) -> 'ok').
--spec(start_supervisor_child/1 :: (atom()) -> 'ok').
--spec(start_supervisor_child/2 :: (atom(), [any()]) -> 'ok').
--spec(start_supervisor_child/3 :: (atom(), atom(), [any()]) -> 'ok').
--spec(start_restartable_child/1 :: (atom()) -> 'ok').
--spec(start_restartable_child/2 :: (atom(), [any()]) -> 'ok').
--spec(start_delayed_restartable_child/1 :: (atom()) -> 'ok').
--spec(start_delayed_restartable_child/2 :: (atom(), [any()]) -> 'ok').
--spec(stop_child/1 :: (atom()) -> rabbit_types:ok_or_error(any())).
-
--endif.
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec start_child(atom()) -> 'ok'.
+-spec start_child(atom(), [any()]) -> 'ok'.
+-spec start_child(atom(), atom(), [any()]) -> 'ok'.
+-spec start_supervisor_child(atom()) -> 'ok'.
+-spec start_supervisor_child(atom(), [any()]) -> 'ok'.
+-spec start_supervisor_child(atom(), atom(), [any()]) -> 'ok'.
+-spec start_restartable_child(atom()) -> 'ok'.
+-spec start_restartable_child(atom(), [any()]) -> 'ok'.
+-spec start_delayed_restartable_child(atom()) -> 'ok'.
+-spec start_delayed_restartable_child(atom(), [any()]) -> 'ok'.
+-spec stop_child(atom()) -> rabbit_types:ok_or_error(any()).
 
 %%----------------------------------------------------------------------------
 
@@ -62,7 +58,7 @@ start_child(ChildId, Mod, Args) ->
     child_reply(supervisor:start_child(
                   ?SERVER,
                   {ChildId, {Mod, start_link, Args},
-                   transient, ?MAX_WAIT, worker, [Mod]})).
+                   transient, ?WORKER_WAIT, worker, [Mod]})).
 
 start_supervisor_child(Mod) -> start_supervisor_child(Mod, []).
 
index aed49bbe74373f74bc042c9864331c887dfc1439..390909696499502b4aeb2f4f205d34890379f496 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(create/0 :: () -> 'ok').
--spec(create_local_copy/1 :: ('disc' | 'ram') -> 'ok').
--spec(wait_for_replicated/0 :: () -> 'ok').
--spec(wait/1 :: ([atom()]) -> 'ok').
--spec(wait_timeout/0 :: () -> non_neg_integer() | infinity).
--spec(force_load/0 :: () -> 'ok').
--spec(is_present/0 :: () -> boolean()).
--spec(is_empty/0 :: () -> boolean()).
--spec(needs_default_data/0 :: () -> boolean()).
--spec(check_schema_integrity/0 :: () -> rabbit_types:ok_or_error(any())).
--spec(clear_ram_only_tables/0 :: () -> 'ok').
-
--endif.
+-spec create() -> 'ok'.
+-spec create_local_copy('disc' | 'ram') -> 'ok'.
+-spec wait_for_replicated() -> 'ok'.
+-spec wait([atom()]) -> 'ok'.
+-spec wait_timeout() -> non_neg_integer() | infinity.
+-spec force_load() -> 'ok'.
+-spec is_present() -> boolean().
+-spec is_empty() -> boolean().
+-spec needs_default_data() -> boolean().
+-spec check_schema_integrity() -> rabbit_types:ok_or_error(any()).
+-spec clear_ram_only_tables() -> 'ok'.
 
 %%----------------------------------------------------------------------------
 %% Main interface
index cd4ecf9c382522ccc27642a712ddb54468c88a4f..4bfd94e1e0022c3fd8cbf11bdc49994ae16c50cd 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
+-type state() :: rabbit_types:exchange() | 'none'.
 
--type(state() :: rabbit_types:exchange() | 'none').
-
--spec(init/1 :: (rabbit_types:vhost()) -> state()).
--spec(enabled/1 :: (rabbit_types:vhost()) -> boolean()).
--spec(tap_in/6 :: (rabbit_types:basic_message(), [rabbit_amqqueue:name()],
+-spec init(rabbit_types:vhost()) -> state().
+-spec enabled(rabbit_types:vhost()) -> boolean().
+-spec tap_in(rabbit_types:basic_message(), [rabbit_amqqueue:name()],
                    binary(), rabbit_channel:channel_number(),
-                   rabbit_types:username(), state()) -> 'ok').
--spec(tap_out/5 :: (rabbit_amqqueue:qmsg(), binary(),
+                   rabbit_types:username(), state()) -> 'ok'.
+-spec tap_out(rabbit_amqqueue:qmsg(), binary(),
                     rabbit_channel:channel_number(),
-                    rabbit_types:username(), state()) -> 'ok').
-
--spec(start/1 :: (rabbit_types:vhost()) -> 'ok').
--spec(stop/1 :: (rabbit_types:vhost()) -> 'ok').
+                    rabbit_types:username(), state()) -> 'ok'.
 
--endif.
+-spec start(rabbit_types:vhost()) -> 'ok'.
+-spec stop(rabbit_types:vhost()) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
index 2f59554d3cb6ddc9623fbb63d18516ee9c6cae59..f88b7cc73fcb46d5d0e546afb8bf6a5e7123feaf 100644 (file)
 
 %% -------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(maybe_upgrade_mnesia/0 :: () -> 'ok').
--spec(maybe_upgrade_local/0 :: () -> 'ok' |
-                                     'version_not_available' |
-                                     'starting_from_scratch').
-
--endif.
+-spec maybe_upgrade_mnesia() -> 'ok'.
+-spec maybe_upgrade_local() ->
+          'ok' |
+          'version_not_available' |
+          'starting_from_scratch'.
 
 %% -------------------------------------------------------------------
 
index f9ed62b4b261a2b35a95e6697335f528ac5d91ca..67c2a84a0ea118eb60f8066009adaca7afa9dade 100644 (file)
 
 %% -------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(remove_user_scope/0     :: () -> 'ok').
--spec(hash_passwords/0        :: () -> 'ok').
--spec(add_ip_to_listener/0    :: () -> 'ok').
--spec(internal_exchanges/0    :: () -> 'ok').
--spec(user_to_internal_user/0 :: () -> 'ok').
--spec(topic_trie/0            :: () -> 'ok').
--spec(semi_durable_route/0    :: () -> 'ok').
--spec(exchange_event_serial/0 :: () -> 'ok').
--spec(trace_exchanges/0       :: () -> 'ok').
--spec(user_admin_to_tags/0    :: () -> 'ok').
--spec(ha_mirrors/0            :: () -> 'ok').
--spec(gm/0                    :: () -> 'ok').
--spec(exchange_scratch/0      :: () -> 'ok').
--spec(mirrored_supervisor/0   :: () -> 'ok').
--spec(topic_trie_node/0       :: () -> 'ok').
--spec(runtime_parameters/0    :: () -> 'ok').
--spec(policy/0                :: () -> 'ok').
--spec(sync_slave_pids/0       :: () -> 'ok').
--spec(no_mirror_nodes/0       :: () -> 'ok').
--spec(gm_pids/0               :: () -> 'ok').
--spec(exchange_decorators/0   :: () -> 'ok').
--spec(policy_apply_to/0       :: () -> 'ok').
--spec(queue_decorators/0      :: () -> 'ok').
--spec(internal_system_x/0     :: () -> 'ok').
--spec(cluster_name/0          :: () -> 'ok').
--spec(down_slave_nodes/0      :: () -> 'ok').
--spec(queue_state/0           :: () -> 'ok').
--spec(recoverable_slaves/0    :: () -> 'ok').
--spec(user_password_hashing/0 :: () -> 'ok').
-
--endif.
+-spec remove_user_scope() -> 'ok'.
+-spec hash_passwords() -> 'ok'.
+-spec add_ip_to_listener() -> 'ok'.
+-spec internal_exchanges() -> 'ok'.
+-spec user_to_internal_user() -> 'ok'.
+-spec topic_trie() -> 'ok'.
+-spec semi_durable_route() -> 'ok'.
+-spec exchange_event_serial() -> 'ok'.
+-spec trace_exchanges() -> 'ok'.
+-spec user_admin_to_tags() -> 'ok'.
+-spec ha_mirrors() -> 'ok'.
+-spec gm() -> 'ok'.
+-spec exchange_scratch() -> 'ok'.
+-spec mirrored_supervisor() -> 'ok'.
+-spec topic_trie_node() -> 'ok'.
+-spec runtime_parameters() -> 'ok'.
+-spec policy() -> 'ok'.
+-spec sync_slave_pids() -> 'ok'.
+-spec no_mirror_nodes() -> 'ok'.
+-spec gm_pids() -> 'ok'.
+-spec exchange_decorators() -> 'ok'.
+-spec policy_apply_to() -> 'ok'.
+-spec queue_decorators() -> 'ok'.
+-spec internal_system_x() -> 'ok'.
+-spec cluster_name() -> 'ok'.
+-spec down_slave_nodes() -> 'ok'.
+-spec queue_state() -> 'ok'.
+-spec recoverable_slaves() -> 'ok'.
+-spec user_password_hashing() -> 'ok'.
 
 %%--------------------------------------------------------------------
 
index d5b090bed472f1108d7114eb81ffca7b6bd276cc..9ad752a174b4aa227d61f3db9f527e84084bfc37 100644 (file)
 
 -rabbit_upgrade({multiple_routing_keys, local, []}).
 
--ifdef(use_specs).
+-type seq_id()  :: non_neg_integer().
 
--type(seq_id()  :: non_neg_integer()).
-
--type(rates() :: #rates { in        :: float(),
+-type rates() :: #rates { in        :: float(),
                           out       :: float(),
                           ack_in    :: float(),
                           ack_out   :: float(),
-                          timestamp :: rabbit_types:timestamp()}).
+                          timestamp :: rabbit_types:timestamp()}.
 
--type(delta() :: #delta { start_seq_id :: non_neg_integer(),
+-type delta() :: #delta { start_seq_id :: non_neg_integer(),
                           count        :: non_neg_integer(),
-                          end_seq_id   :: non_neg_integer() }).
+                          end_seq_id   :: non_neg_integer() }.
 
 %% The compiler (rightfully) complains that ack() and state() are
 %% unused. For this reason we duplicate a -spec from
 %% warnings. The problem here is that we can't parameterise the BQ
 %% behaviour by these two types as we would like to. We still leave
 %% these here for documentation purposes.
--type(ack() :: seq_id()).
--type(state() :: #vqstate {
+-type ack() :: seq_id().
+-type state() :: #vqstate {
              q1                    :: ?QUEUE:?QUEUE(),
              q2                    :: ?QUEUE:?QUEUE(),
              delta                 :: delta(),
              out_counter           :: non_neg_integer(),
              in_counter            :: non_neg_integer(),
              rates                 :: rates(),
-             msgs_on_disk          :: gb_sets:set(),
-             msg_indices_on_disk   :: gb_sets:set(),
-             unconfirmed           :: gb_sets:set(),
-             confirmed             :: gb_sets:set(),
+             msgs_on_disk          :: ?GB_SET_TYPE(),
+             msg_indices_on_disk   :: ?GB_SET_TYPE(),
+             unconfirmed           :: ?GB_SET_TYPE(),
+             confirmed             :: ?GB_SET_TYPE(),
              ack_out_counter       :: non_neg_integer(),
              ack_in_counter        :: non_neg_integer(),
              disk_read_count       :: non_neg_integer(),
              disk_write_count      :: non_neg_integer(),
 
              io_batch_size         :: pos_integer(),
-             mode                  :: 'default' | 'lazy' }).
+             mode                  :: 'default' | 'lazy' }.
 %% Duplicated from rabbit_backing_queue
--spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}).
-
--spec(multiple_routing_keys/0 :: () -> 'ok').
+-spec ack([ack()], state()) -> {[rabbit_guid:guid()], state()}.
 
--endif.
+-spec multiple_routing_keys() -> 'ok'.
 
 -define(BLANK_DELTA, #delta { start_seq_id = undefined,
                               count        = 0,
@@ -556,7 +552,7 @@ delete_crashed(#amqqueue{name = QName}) ->
     ok = rabbit_queue_index:erase(QName).
 
 purge(State = #vqstate { len = Len }) ->
-    case is_pending_ack_empty(State) of
+    case is_pending_ack_empty(State) and is_unconfirmed_empty(State) of
         true ->
             {Len, purge_and_index_reset(State)};
         false ->
@@ -688,12 +684,12 @@ requeue(AckTags, #vqstate { mode       = default,
                                                   State2),
     MsgCount = length(MsgIds2),
     {MsgIds2, a(reduce_memory_use(
-                  maybe_update_rates(
+                  maybe_update_rates(ui(
                     State3 #vqstate { delta      = Delta1,
                                       q3         = Q3a,
                                       q4         = Q4a,
                                       in_counter = InCounter + MsgCount,
-                                      len        = Len + MsgCount })))};
+                                      len        = Len + MsgCount }))))};
 requeue(AckTags, #vqstate { mode       = lazy,
                             delta      = Delta,
                             q3         = Q3,
@@ -706,11 +702,11 @@ requeue(AckTags, #vqstate { mode       = lazy,
                                                 State1),
     MsgCount = length(MsgIds1),
     {MsgIds1, a(reduce_memory_use(
-                  maybe_update_rates(
+                  maybe_update_rates(ui(
                     State2 #vqstate { delta      = Delta1,
                                       q3         = Q3a,
                                       in_counter = InCounter + MsgCount,
-                                      len        = Len + MsgCount })))}.
+                                      len        = Len + MsgCount }))))}.
 
 ackfold(MsgFun, Acc, State, AckTags) ->
     {AccN, StateN} =
@@ -1648,6 +1644,9 @@ reset_qi_state(State = #vqstate{index_state = IndexState}) ->
 is_pending_ack_empty(State) ->
     count_pending_acks(State) =:= 0.
 
+is_unconfirmed_empty(#vqstate { unconfirmed = UC }) ->
+    gb_sets:is_empty(UC).
+
 count_pending_acks(#vqstate { ram_pending_ack   = RPA,
                               disk_pending_ack  = DPA,
                               qi_pending_ack    = QPA }) ->
@@ -2124,7 +2123,7 @@ publish_alpha(MsgStatus, State) ->
     {MsgStatus, stats({1, -1}, {MsgStatus, MsgStatus}, State)}.
 
 publish_beta(MsgStatus, State) ->
-    {MsgStatus1, State1} = maybe_write_to_disk(true, false, MsgStatus, State),
+    {MsgStatus1, State1} = maybe_prepare_write_to_disk(true, false, MsgStatus, State),
     MsgStatus2 = m(trim_msg_status(MsgStatus1)),
     {MsgStatus2, stats({1, -1}, {MsgStatus, MsgStatus2}, State1)}.
 
@@ -2161,7 +2160,7 @@ delta_merge(SeqIds, Delta, MsgIds, State) ->
                         {#msg_status { msg_id = MsgId } = MsgStatus, State1} =
                             msg_from_pending_ack(SeqId, State0),
                         {_MsgStatus, State2} =
-                            maybe_write_to_disk(true, true, MsgStatus, State1),
+                            maybe_prepare_write_to_disk(true, true, MsgStatus, State1),
                         {expand_delta(SeqId, Delta0), [MsgId | MsgIds0],
                          stats({1, -1}, {MsgStatus, none}, State2)}
                 end, {Delta, MsgIds, State}, SeqIds).
index 8167c8622ad15111d431b7e705758b0f4a4c913d..a27f0aca00525efcb7228f3e15fddfd0f3ff808c 100644 (file)
          version_error/3]).
 
 %% -------------------------------------------------------------------
--ifdef(use_specs).
 
 -export_type([scope/0, step/0]).
 
--type(scope() :: atom()).
--type(scope_version() :: [atom()]).
--type(step() :: {atom(), atom()}).
-
--type(version() :: [atom()]).
-
--spec(recorded/0 :: () -> rabbit_types:ok_or_error2(version(), any())).
--spec(matches/2 :: ([A], [A]) -> boolean()).
--spec(desired/0 :: () -> version()).
--spec(desired_for_scope/1 :: (scope()) -> scope_version()).
--spec(record_desired/0 :: () -> 'ok').
--spec(record_desired_for_scope/1 ::
-        (scope()) -> rabbit_types:ok_or_error(any())).
--spec(upgrades_required/1 ::
-        (scope()) -> rabbit_types:ok_or_error2([step()], any())).
--spec(check_version_consistency/3 ::
-        (string(), string(), string()) -> rabbit_types:ok_or_error(any())).
--spec(check_version_consistency/4 ::
+-type scope() :: atom().
+-type scope_version() :: [atom()].
+-type step() :: {atom(), atom()}.
+
+-type version() :: [atom()].
+
+-spec recorded() -> rabbit_types:ok_or_error2(version(), any()).
+-spec matches([A], [A]) -> boolean().
+-spec desired() -> version().
+-spec desired_for_scope(scope()) -> scope_version().
+-spec record_desired() -> 'ok'.
+-spec record_desired_for_scope
+        (scope()) -> rabbit_types:ok_or_error(any()).
+-spec upgrades_required
+        (scope()) -> rabbit_types:ok_or_error2([step()], any()).
+-spec check_version_consistency
+        (string(), string(), string()) -> rabbit_types:ok_or_error(any()).
+-spec check_version_consistency
         (string(), string(), string(), string()) ->
-                                          rabbit_types:ok_or_error(any())).
--spec(check_otp_consistency/1 ::
-        (string()) -> rabbit_types:ok_or_error(any())).
--endif.
+                                          rabbit_types:ok_or_error(any()).
+-spec check_otp_consistency
+        (string()) -> rabbit_types:ok_or_error(any()).
+
 %% -------------------------------------------------------------------
 
 -define(VERSION_FILENAME, "schema_version").
index f362ef930f1a64080395f9640a59f5bf820c4902..df2f8423b48a298c6049629f1be727dbdb098b40 100644 (file)
 -export([add/1, delete/1, exists/1, list/0, with/2, assert/1]).
 -export([info/1, info/2, info_all/0, info_all/1, info_all/2, info_all/3]).
 
--ifdef(use_specs).
-
--spec(add/1 :: (rabbit_types:vhost()) -> 'ok').
--spec(delete/1 :: (rabbit_types:vhost()) -> 'ok').
--spec(exists/1 :: (rabbit_types:vhost()) -> boolean()).
--spec(list/0 :: () -> [rabbit_types:vhost()]).
--spec(with/2 :: (rabbit_types:vhost(), rabbit_misc:thunk(A)) -> A).
--spec(assert/1 :: (rabbit_types:vhost()) -> 'ok').
-
--spec(info/1 :: (rabbit_types:vhost()) -> rabbit_types:infos()).
--spec(info/2 :: (rabbit_types:vhost(), rabbit_types:info_keys())
-                -> rabbit_types:infos()).
--spec(info_all/0 :: () -> [rabbit_types:infos()]).
--spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
--spec(info_all/3 :: (rabbit_types:info_keys(), reference(), pid()) ->
-                         'ok').
-
--endif.
+-spec add(rabbit_types:vhost()) -> 'ok'.
+-spec delete(rabbit_types:vhost()) -> 'ok'.
+-spec exists(rabbit_types:vhost()) -> boolean().
+-spec list() -> [rabbit_types:vhost()].
+-spec with(rabbit_types:vhost(), rabbit_misc:thunk(A)) -> A.
+-spec assert(rabbit_types:vhost()) -> 'ok'.
+
+-spec info(rabbit_types:vhost()) -> rabbit_types:infos().
+-spec info(rabbit_types:vhost(), rabbit_types:info_keys())
+                -> rabbit_types:infos().
+-spec info_all() -> [rabbit_types:infos()].
+-spec info_all(rabbit_types:info_keys()) -> [rabbit_types:infos()].
+-spec info_all(rabbit_types:info_keys(), reference(), pid()) ->
+                         'ok'.
 
 %%----------------------------------------------------------------------------
 
index d5f7328fec770c2547dd6737f175776b1bb42a23..9c8732bb6b62bdfdd45baa2834598bc2930e2b97 100644 (file)
 
 -module(rabbit_vm).
 
--export([memory/0, binary/0]).
+-export([memory/0, binary/0, ets_tables_memory/1]).
 
 -define(MAGIC_PLUGINS, ["mochiweb", "webmachine", "cowboy", "sockjs",
                         "rfc4627_jsonrpc"]).
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(memory/0 :: () -> rabbit_types:infos()).
--spec(binary/0 :: () -> rabbit_types:infos()).
-
--endif.
+-spec memory() -> rabbit_types:infos().
+-spec binary() -> rabbit_types:infos().
+-spec ets_tables_memory(Owners) -> rabbit_types:infos()
+     when Owners :: all | OwnerProcessName | [OwnerProcessName],
+          OwnerProcessName :: atom().
 
 %%----------------------------------------------------------------------------
 
@@ -45,7 +44,7 @@ memory() ->
 
     Mnesia       = mnesia_memory(),
     MsgIndexETS  = ets_memory([msg_store_persistent, msg_store_transient]),
-    MgmtDbETS    = ets_memory([rabbit_mgmt_db]),
+    MgmtDbETS    = ets_memory([rabbit_mgmt_event_collector]),
 
     [{total,     Total},
      {processes, Processes},
@@ -118,10 +117,19 @@ mnesia_memory() ->
     end.
 
 ets_memory(OwnerNames) ->
+    lists:sum([V || {_K, V} <- ets_tables_memory(OwnerNames)]).
+
+ets_tables_memory(all) ->
+    [{ets:info(T, name), bytes(ets:info(T, memory))}
+     || T <- ets:all(),
+        is_atom(T)];
+ets_tables_memory(OwnerName) when is_atom(OwnerName) ->
+    ets_tables_memory([OwnerName]);
+ets_tables_memory(OwnerNames) when is_list(OwnerNames) ->
     Owners = [whereis(N) || N <- OwnerNames],
-    lists:sum([bytes(ets:info(T, memory)) || T <- ets:all(),
-                                             O <- [ets:info(T, owner)],
-                                             lists:member(O, Owners)]).
+    [{ets:info(T, name), bytes(ets:info(T, memory))}
+     || T <- ets:all(),
+        lists:member(ets:info(T, owner), Owners)].
 
 bytes(Words) ->  try
                      Words * erlang:system_info(wordsize)
@@ -218,21 +226,19 @@ conn_type(PDict) ->
 
 %% NB: this code is non-rabbit specific.
 
--ifdef(use_specs).
--type(process() :: pid() | atom()).
--type(info_key() :: atom()).
--type(info_value() :: any()).
--type(info_item() :: {info_key(), info_value()}).
--type(accumulate() :: fun ((info_key(), info_value(), info_value()) ->
-                                  info_value())).
--type(distinguisher() :: fun (([{term(), term()}]) -> atom())).
--type(distinguishers() :: [{info_key(), distinguisher()}]).
--spec(sum_processes/3 :: ([process()], distinguishers(), [info_key()]) ->
-                              {[{process(), [info_item()]}], [info_item()]}).
--spec(sum_processes/4 :: ([process()], accumulate(), distinguishers(),
+-type process() :: pid() | atom().
+-type info_key() :: atom().
+-type info_value() :: any().
+-type info_item() :: {info_key(), info_value()}.
+-type accumulate() :: fun ((info_key(), info_value(), info_value()) ->
+                                  info_value()).
+-type distinguisher() :: fun (([{term(), term()}]) -> atom()).
+-type distinguishers() :: [{info_key(), distinguisher()}].
+-spec sum_processes([process()], distinguishers(), [info_key()]) ->
+                              {[{process(), [info_item()]}], [info_item()]}.
+-spec sum_processes([process()], accumulate(), distinguishers(),
                           [info_item()]) ->
-                              {[{process(), [info_item()]}], [info_item()]}).
--endif.
+                              {[{process(), [info_item()]}], [info_item()]}.
 
 sum_processes(Names, Distinguishers, Items) ->
     sum_processes(Names, fun (_, X, Y) -> X + Y end, Distinguishers,
index 981956a071096d7af298c60cea25451af723309f..5b0f56dc26747551f1bfb91b9824ba91fdd6bc58 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/3 :: (atom(), rabbit_types:mfargs(), rabbit_types:mfargs()) ->
-                           rabbit_types:ok_pid_or_error()).
-
--endif.
+-spec start_link(atom(), rabbit_types:mfargs(), rabbit_types:mfargs()) ->
+          rabbit_types:ok_pid_or_error().
 
 %%----------------------------------------------------------------------------
 
index 36b04a4593178c860549d07f183b0bdeab3ad2d8..5f15592455b859de21a9b62ef4a9352b2fdc1fce 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
+-type mfargs() :: {atom(), atom(), [any()]}.
 
--type(mfargs() :: {atom(), atom(), [any()]}).
-
--spec(start_link/5 ::
+-spec start_link
         (inet:ip_address(), inet:port_number(),
          mfargs(), mfargs(), string()) ->
-                           rabbit_types:ok_pid_or_error()).
-
--endif.
+                           rabbit_types:ok_pid_or_error().
 
 %%--------------------------------------------------------------------
 
index 98a7c890d92c103682ee2cb558d771ca314be159..5ef652ad60c1a9b4d2a038f7f6c545d894777e3f 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
+-type mfargs() :: {atom(), atom(), [any()]}.
 
--type(mfargs() :: {atom(), atom(), [any()]}).
-
--spec(start_link/10 ::
+-spec start_link
         (inet:ip_address(), inet:port_number(), module(), [gen_tcp:listen_option()],
          module(), any(), mfargs(), mfargs(), integer(), string()) ->
-                           rabbit_types:ok_pid_or_error()).
-
--endif.
+                           rabbit_types:ok_pid_or_error().
 
 %%----------------------------------------------------------------------------
 
index 1c9b08ed2746219b8821e0fc53068b9f4c32eac3..a1586b0cb0152a255b264a01b4071a1a11c001ad 100644 (file)
 -record(params, {content, struct, content_dec, struct_dec}).
 
 -export([log_event/2, term/2]).
-%% exported for testing
--export([test/0]).
+
+-ifdef(TEST).
+-export([term_size/3]).
+-endif.
 
 log_event({Type, GL, {Pid, Format, Args}}, Params)
   when Type =:= error orelse
@@ -123,72 +125,3 @@ tuple_term_size(_T, M, I, S, _W) when I > S ->
     M;
 tuple_term_size(T, M, I, S, W) ->
     tuple_term_size(T, lim(term_size(element(I, T), M, W), 2 * W), I + 1, S, W).
-
-%%----------------------------------------------------------------------------
-
-test() ->
-    test_short_examples_exactly(),
-    test_term_limit(),
-    test_large_examples_for_size(),
-    ok.
-
-test_short_examples_exactly() ->
-    F = fun (Term, Exp) ->
-                Exp = term(Term, {1, {10, 10, 5, 5}}),
-                Term = term(Term, {100000, {10, 10, 5, 5}})
-        end,
-    FSmall = fun (Term, Exp) ->
-                     Exp = term(Term, {1, {2, 2, 2, 2}}),
-                     Term = term(Term, {100000, {2, 2, 2, 2}})
-             end,
-    F([], []),
-    F("h", "h"),
-    F("hello world", "hello w..."),
-    F([[h,e,l,l,o,' ',w,o,r,l,d]], [[h,e,l,l,o,'...']]),
-    F([a|b], [a|b]),
-    F(<<"hello">>, <<"hello">>),
-    F([<<"hello world">>], [<<"he...">>]),
-    F(<<1:1>>, <<1:1>>),
-    F(<<1:81>>, <<0:56, "...">>),
-    F({{{{a}}},{b},c,d,e,f,g,h,i,j,k}, {{{'...'}},{b},c,d,e,f,g,h,i,j,'...'}),
-    FSmall({a,30,40,40,40,40}, {a,30,'...'}),
-    FSmall([a,30,40,40,40,40], [a,30,'...']),
-    P = spawn(fun() -> receive die -> ok end end),
-    F([0, 0.0, <<1:1>>, F, P], [0, 0.0, <<1:1>>, F, P]),
-    P ! die,
-    R = make_ref(),
-    F([R], [R]),
-    ok.
-
-test_term_limit() ->
-    W = erlang:system_info(wordsize),
-    S = <<"abc">>,
-    1 = term_size(S, 4, W),
-    limit_exceeded = term_size(S, 3, W),
-    case 100 - term_size([S, S], 100, W) of
-        22 -> ok; %% 32 bit
-        38 -> ok  %% 64 bit
-    end,
-    case 100 - term_size([S, [S]], 100, W) of
-        30 -> ok; %% ditto
-        54 -> ok
-    end,
-    limit_exceeded = term_size([S, S], 6, W),
-    ok.
-
-test_large_examples_for_size() ->
-    %% Real world values
-    Shrink = fun(Term) -> term(Term, {1, {1000, 100, 50, 5}}) end,
-    TestSize = fun(Term) ->
-                       true = 5000000 < size(term_to_binary(Term)),
-                       true = 500000 > size(term_to_binary(Shrink(Term)))
-               end,
-    TestSize(lists:seq(1, 5000000)),
-    TestSize(recursive_list(1000, 10)),
-    TestSize(recursive_list(5000, 20)),
-    TestSize(gb_sets:from_list([I || I <- lists:seq(1, 1000000)])),
-    TestSize(gb_trees:from_orddict([{I, I} || I <- lists:seq(1, 1000000)])),
-    ok.
-
-recursive_list(S, 0) -> lists:seq(1, S);
-recursive_list(S, N) -> [recursive_list(S div N, N-1) || _ <- lists:seq(1, S)].
index 68926962b045aac1a9e83b720b14fe90d2c17d5a..6b043685bde64bb9b6abae7a58bec13d91b50c92 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--type(vm_memory_high_watermark() :: (float() | {'absolute', integer() | string()})).
--spec(start_link/1 :: (float()) -> rabbit_types:ok_pid_or_error()).
--spec(start_link/3 :: (float(), fun ((any()) -> 'ok'),
-                       fun ((any()) -> 'ok')) -> rabbit_types:ok_pid_or_error()).
--spec(get_total_memory/0 :: () -> (non_neg_integer() | 'unknown')).
--spec(get_vm_limit/0 :: () -> non_neg_integer()).
--spec(get_check_interval/0 :: () -> non_neg_integer()).
--spec(set_check_interval/1 :: (non_neg_integer()) -> 'ok').
--spec(get_vm_memory_high_watermark/0 :: () -> vm_memory_high_watermark()).
--spec(set_vm_memory_high_watermark/1 :: (vm_memory_high_watermark()) -> 'ok').
--spec(get_memory_limit/0 :: () -> non_neg_integer()).
-
--endif.
+-type vm_memory_high_watermark() :: (float() | {'absolute', integer() | string()}).
+-spec start_link(float()) -> rabbit_types:ok_pid_or_error().
+-spec start_link(float(), fun ((any()) -> 'ok'),
+                       fun ((any()) -> 'ok')) -> rabbit_types:ok_pid_or_error().
+-spec get_total_memory() -> (non_neg_integer() | 'unknown').
+-spec get_vm_limit() -> non_neg_integer().
+-spec get_check_interval() -> non_neg_integer().
+-spec set_check_interval(non_neg_integer()) -> 'ok'.
+-spec get_vm_memory_high_watermark() -> vm_memory_high_watermark().
+-spec set_vm_memory_high_watermark(vm_memory_high_watermark()) -> 'ok'.
+-spec get_memory_limit() -> non_neg_integer().
 
 %%----------------------------------------------------------------------------
 %% Public API
index 082e92446bda287582498290a0d0dd70b4a1bd5a..c0be486f5fbaf72df6f598d73ceddd5d9303a6f1 100644 (file)
@@ -18,8 +18,8 @@
 
 %% Generic worker pool manager.
 %%
-%% Submitted jobs are functions. They can be executed asynchronously
-%% (using worker_pool:submit/1, worker_pool:submit/2) or synchronously
+%% Submitted jobs are functions. They can be executed synchronously
+%% (using worker_pool:submit/1, worker_pool:submit/2) or asynchronously
 %% (using worker_pool:submit_async/1).
 %%
 %% We typically use the worker pool if we want to limit the maximum
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
+-type mfargs() :: {atom(), atom(), [any()]}.
 
--type(mfargs() :: {atom(), atom(), [any()]}).
-
--spec(start_link/1 :: (atom()) -> {'ok', pid()} | {'error', any()}).
--spec(submit/1 :: (fun (() -> A) | mfargs()) -> A).
--spec(submit/2 :: (fun (() -> A) | mfargs(), 'reuse' | 'single') -> A).
--spec(submit/3 :: (atom(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A).
--spec(submit_async/1 :: (fun (() -> any()) | mfargs()) -> 'ok').
--spec(ready/2 :: (atom(), pid()) -> 'ok').
--spec(idle/2 :: (atom(), pid()) -> 'ok').
--spec(default_pool/0 :: () -> atom()).
-
--endif.
+-spec start_link(atom()) -> {'ok', pid()} | {'error', any()}.
+-spec submit(fun (() -> A) | mfargs()) -> A.
+-spec submit(fun (() -> A) | mfargs(), 'reuse' | 'single') -> A.
+-spec submit(atom(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A.
+-spec submit_async(fun (() -> any()) | mfargs()) -> 'ok'.
+-spec ready(atom(), pid()) -> 'ok'.
+-spec idle(atom(), pid()) -> 'ok'.
+-spec default_pool() -> atom().
 
 %%----------------------------------------------------------------------------
 
index d846c262466a3307ae86c200cc6aa8ba8e5b0aac..f4ed4d70c2427d71540153917d00c39db36a5676 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start_link/1 :: (non_neg_integer()) -> rabbit_types:ok_pid_or_error()).
--spec(start_link/2 :: (non_neg_integer(), atom())
-                   -> rabbit_types:ok_pid_or_error()).
-
--endif.
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec start_link(non_neg_integer()) -> rabbit_types:ok_pid_or_error().
+-spec start_link(non_neg_integer(), atom())
+                   -> rabbit_types:ok_pid_or_error().
 
 %%----------------------------------------------------------------------------
 
@@ -48,7 +44,11 @@ start_link(WCount, PoolName) ->
 %%----------------------------------------------------------------------------
 
 init([WCount, PoolName]) ->
-    {ok, {{one_for_one, 10, 10},
+    %% we want to survive up to 1K of worker restarts per second,
+    %% e.g. when a large worker pool used for network connections
+    %% encounters a network failure. This is the case in the LDAP authentication
+    %% backend plugin.
+    {ok, {{one_for_one, 1000, 1},
           [{worker_pool, {worker_pool, start_link, [PoolName]}, transient,
             16#ffffffff, worker, [worker_pool]} |
            [{N, {worker_pool_worker, start_link, [PoolName]}, transient,
index 259af5e4a2f7a27e6c9491936fe2dd266a1cc360..bd07f0d782ad7b68afa861ae9361504a62211367 100644 (file)
 
 %%----------------------------------------------------------------------------
 
--ifdef(use_specs).
-
--type(mfargs() :: {atom(), atom(), [any()]}).
-
--spec(start_link/1 :: (atom) -> {'ok', pid()} | {'error', any()}).
--spec(next_job_from/2 :: (pid(), pid()) -> 'ok').
--spec(submit/3 :: (pid(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A).
--spec(submit_async/2 :: (pid(), fun (() -> any()) | mfargs()) -> 'ok').
--spec(run/1 :: (fun (() -> A)) -> A; (mfargs()) -> any()).
--spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
-
--endif.
+-type mfargs() :: {atom(), atom(), [any()]}.
+
+-spec start_link(atom) -> {'ok', pid()} | {'error', any()}.
+-spec next_job_from(pid(), pid()) -> 'ok'.
+-spec submit(pid(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A.
+-spec submit_async(pid(), fun (() -> any()) | mfargs()) -> 'ok'.
+-spec run(fun (() -> A)) -> A; (mfargs()) -> any().
+-spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'.
 
 %%----------------------------------------------------------------------------
 
diff --git a/rabbitmq-server/test/channel_operation_timeout_SUITE.erl b/rabbitmq-server/test/channel_operation_timeout_SUITE.erl
new file mode 100644 (file)
index 0000000..7b41b9c
--- /dev/null
@@ -0,0 +1,196 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(channel_operation_timeout_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile([export_all]).
+
+-import(rabbit_misc, [pget/2]).
+
+-define(CONFIG, [cluster_ab]).
+-define(DEFAULT_VHOST, <<"/">>).
+-define(QRESOURCE(Q), rabbit_misc:r(?DEFAULT_VHOST, queue, Q)).
+-define(TIMEOUT_TEST_MSG,   <<"timeout_test_msg!">>).
+-define(DELAY,   25).
+
+all() ->
+    [
+      notify_down_all
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase),
+    ClusterSize = 2,
+    TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, ClusterSize},
+        {rmq_nodename_suffix, Testcase},
+        {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+    Config1 = rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+notify_down_all(Config) ->
+    Rabbit = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    RabbitCh = rabbit_ct_client_helpers:open_channel(Config, 0),
+    HareCh = rabbit_ct_client_helpers:open_channel(Config, 1),
+
+    %% success
+    set_channel_operation_timeout_config(Config, 1000),
+    configure_bq(Config),
+    QCfg0    = qconfig(RabbitCh, <<"q0">>, <<"ex0">>, true, false),
+    declare(QCfg0),
+    %% Testing rabbit_amqqueue:notify_down_all via rabbit_channel.
+    %% Consumer count = 0 after correct channel termination and
+    %% notification of queues via delagate:call/3
+    true = (0 =/= length(get_consumers(Config, Rabbit, ?DEFAULT_VHOST))),
+    rabbit_ct_client_helpers:close_channel(RabbitCh),
+    0 = length(get_consumers(Config, Rabbit, ?DEFAULT_VHOST)),
+    false = is_process_alive(RabbitCh),
+
+    %% fail
+    set_channel_operation_timeout_config(Config, 10),
+    QCfg2 = qconfig(HareCh, <<"q1">>, <<"ex1">>, true, false),
+    declare(QCfg2),
+    publish(QCfg2, ?TIMEOUT_TEST_MSG),
+    timer:sleep(?DELAY),
+    rabbit_ct_client_helpers:close_channel(HareCh),
+    timer:sleep(?DELAY),
+    false = is_process_alive(HareCh),
+
+    pass.
+
+%% -------------------------
+%% Internal helper functions
+%% -------------------------
+
+set_channel_operation_timeout_config(Config, Timeout) ->
+    [ok = Ret
+     || Ret <- rabbit_ct_broker_helpers:rpc_all(Config,
+       application, set_env, [rabbit, channel_operation_timeout, Timeout])],
+    ok.
+
+set_channel_operation_backing_queue(Config) ->
+    [ok = Ret
+     || Ret <- rabbit_ct_broker_helpers:rpc_all(Config,
+       application, set_env,
+       [rabbit, backing_queue_module, channel_operation_timeout_test_queue])],
+    ok.
+
+re_enable_priority_queue(Config) ->
+    [ok = Ret
+     || Ret <- rabbit_ct_broker_helpers:rpc_all(Config,
+       rabbit_priority_queue, enable, [])],
+    ok.
+
+declare(QCfg) ->
+    QDeclare = #'queue.declare'{queue = Q = pget(name, QCfg), durable = true},
+    #'queue.declare_ok'{} = amqp_channel:call(Ch = pget(ch, QCfg), QDeclare),
+
+    ExDeclare =  #'exchange.declare'{exchange = Ex = pget(ex, QCfg)},
+    #'exchange.declare_ok'{} = amqp_channel:call(Ch, ExDeclare),
+
+    #'queue.bind_ok'{} =
+        amqp_channel:call(Ch, #'queue.bind'{queue       = Q,
+                                            exchange    = Ex,
+                                            routing_key = Q}),
+    maybe_subscribe(QCfg).
+
+maybe_subscribe(QCfg) ->
+    case pget(consume, QCfg) of
+        true ->
+            Sub = #'basic.consume'{queue  = pget(name, QCfg)},
+            Ch  = pget(ch, QCfg),
+            Del = pget(deliver, QCfg),
+            amqp_channel:subscribe(Ch, Sub,
+                                   spawn(fun() -> consume(Ch, Del) end));
+        _ ->  ok
+    end.
+
+consume(_Ch, false) -> receive_nothing();
+consume(Ch, Deliver = true) ->
+    receive
+        {#'basic.deliver'{}, _Msg} ->
+            consume(Ch, Deliver)
+    end.
+
+publish(QCfg, Msg) ->
+    Publish = #'basic.publish'{exchange = pget(ex, QCfg),
+                               routing_key = pget(name, QCfg)},
+    amqp_channel:call(pget(ch, QCfg), Publish,
+                      #amqp_msg{payload = Msg}).
+
+get_consumers(Config, Node, VHost) when is_atom(Node),
+                                        is_binary(VHost) ->
+    rabbit_ct_broker_helpers:rpc(Config, Node,
+      rabbit_amqqueue, consumers_all, [VHost]).
+
+get_amqqueue(Q, []) -> throw({not_found, Q});
+get_amqqueue(Q, [AMQQ = #amqqueue{name = Q} | _]) -> AMQQ;
+get_amqqueue(Q, [_| Rem]) -> get_amqqueue(Q, Rem).
+
+qconfig(Ch, Name, Ex, Consume, Deliver) ->
+    [{ch, Ch}, {name, Name}, {ex,Ex}, {consume, Consume}, {deliver, Deliver}].
+
+receive_nothing() ->
+    receive
+    after infinity -> void
+    end.
+
+unhandled_req(Fun) ->
+    try
+        Fun()
+    catch
+        exit:{{shutdown,{_, ?NOT_FOUND, _}}, _} -> ok;
+        _:Reason                                -> {error, Reason}
+    end.
+
+configure_bq(Config) ->
+    ok = set_channel_operation_backing_queue(Config),
+    ok = re_enable_priority_queue(Config),
+    ok = rabbit_ct_broker_helpers:add_code_path_to_all_nodes(Config,
+      ?MODULE).
diff --git a/rabbitmq-server/test/channel_operation_timeout_test_queue.erl b/rabbitmq-server/test/channel_operation_timeout_test_queue.erl
new file mode 100644 (file)
index 0000000..0bb3f5a
--- /dev/null
@@ -0,0 +1,2439 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(channel_operation_timeout_test_queue).
+
+-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
+         purge/1, purge_acks/1,
+         publish/6, publish_delivered/5,
+         batch_publish/4, batch_publish_delivered/4,
+         discard/4, drain_confirmed/1,
+         dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
+         ackfold/4, fold/3, len/1, is_empty/1, depth/1,
+         set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
+         handle_pre_hibernate/1, resume/1, msg_rates/1,
+         info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+         zip_msgs_and_acks/4,  multiple_routing_keys/0]).
+
+-export([start/1, stop/0]).
+
+%% exported for testing only
+-export([start_msg_store/2, stop_msg_store/0, init/6]).
+
+%%----------------------------------------------------------------------------
+%% This test backing queue follows the variable queue implementation, with
+%% the exception that it will introduce infinite delays on some operations if
+%% the test message has been published, and is awaiting acknowledgement in the
+%% queue index. Test message is "timeout_test_msg!".
+%%
+%%----------------------------------------------------------------------------
+
+-behaviour(rabbit_backing_queue).
+
+-record(vqstate,
+        { q1,
+          q2,
+          delta,
+          q3,
+          q4,
+          next_seq_id,
+          ram_pending_ack,    %% msgs using store, still in RAM
+          disk_pending_ack,   %% msgs in store, paged out
+          qi_pending_ack,     %% msgs using qi, *can't* be paged out
+          index_state,
+          msg_store_clients,
+          durable,
+          transient_threshold,
+          qi_embed_msgs_below,
+
+          len,                %% w/o unacked
+          bytes,              %% w/o unacked
+          unacked_bytes,
+          persistent_count,   %% w   unacked
+          persistent_bytes,   %% w   unacked
+
+          target_ram_count,
+          ram_msg_count,      %% w/o unacked
+          ram_msg_count_prev,
+          ram_ack_count_prev,
+          ram_bytes,          %% w   unacked
+          out_counter,
+          in_counter,
+          rates,
+          msgs_on_disk,
+          msg_indices_on_disk,
+          unconfirmed,
+          confirmed,
+          ack_out_counter,
+          ack_in_counter,
+          %% Unlike the other counters these two do not feed into
+          %% #rates{} and get reset
+          disk_read_count,
+          disk_write_count,
+
+          io_batch_size,
+
+          %% default queue or lazy queue
+          mode
+        }).
+
+-record(rates, { in, out, ack_in, ack_out, timestamp }).
+
+-record(msg_status,
+        { seq_id,
+          msg_id,
+          msg,
+          is_persistent,
+          is_delivered,
+          msg_in_store,
+          index_on_disk,
+          persist_to,
+          msg_props
+        }).
+
+-record(delta,
+        { start_seq_id, %% start_seq_id is inclusive
+          count,
+          end_seq_id    %% end_seq_id is exclusive
+        }).
+
+-define(HEADER_GUESS_SIZE, 100). %% see determine_persist_to/2
+-define(PERSISTENT_MSG_STORE, msg_store_persistent).
+-define(TRANSIENT_MSG_STORE,  msg_store_transient).
+-define(QUEUE, lqueue).
+-define(TIMEOUT_TEST_MSG, <<"timeout_test_msg!">>).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+%%----------------------------------------------------------------------------
+
+-rabbit_upgrade({multiple_routing_keys, local, []}).
+
+-type seq_id()  :: non_neg_integer().
+
+-type rates() :: #rates { in        :: float(),
+                          out       :: float(),
+                          ack_in    :: float(),
+                          ack_out   :: float(),
+                          timestamp :: rabbit_types:timestamp()}.
+
+-type delta() :: #delta { start_seq_id :: non_neg_integer(),
+                          count        :: non_neg_integer(),
+                          end_seq_id   :: non_neg_integer() }.
+
+%% The compiler (rightfully) complains that ack() and state() are
+%% unused. For this reason we duplicate a -spec from
+%% rabbit_backing_queue with the only intent being to remove
+%% warnings. The problem here is that we can't parameterise the BQ
+%% behaviour by these two types as we would like to. We still leave
+%% these here for documentation purposes.
+-type ack() :: seq_id().
+-type state() :: #vqstate {
+             q1                    :: ?QUEUE:?QUEUE(),
+             q2                    :: ?QUEUE:?QUEUE(),
+             delta                 :: delta(),
+             q3                    :: ?QUEUE:?QUEUE(),
+             q4                    :: ?QUEUE:?QUEUE(),
+             next_seq_id           :: seq_id(),
+             ram_pending_ack       :: gb_trees:tree(),
+             disk_pending_ack      :: gb_trees:tree(),
+             qi_pending_ack        :: gb_trees:tree(),
+             index_state           :: any(),
+             msg_store_clients     :: 'undefined' | {{any(), binary()},
+                                                    {any(), binary()}},
+             durable               :: boolean(),
+             transient_threshold   :: non_neg_integer(),
+             qi_embed_msgs_below   :: non_neg_integer(),
+
+             len                   :: non_neg_integer(),
+             bytes                 :: non_neg_integer(),
+             unacked_bytes         :: non_neg_integer(),
+
+             persistent_count      :: non_neg_integer(),
+             persistent_bytes      :: non_neg_integer(),
+
+             target_ram_count      :: non_neg_integer() | 'infinity',
+             ram_msg_count         :: non_neg_integer(),
+             ram_msg_count_prev    :: non_neg_integer(),
+             ram_ack_count_prev    :: non_neg_integer(),
+             ram_bytes             :: non_neg_integer(),
+             out_counter           :: non_neg_integer(),
+             in_counter            :: non_neg_integer(),
+             rates                 :: rates(),
+             msgs_on_disk          :: ?GB_SET_TYPE(),
+             msg_indices_on_disk   :: ?GB_SET_TYPE(),
+             unconfirmed           :: ?GB_SET_TYPE(),
+             confirmed             :: ?GB_SET_TYPE(),
+             ack_out_counter       :: non_neg_integer(),
+             ack_in_counter        :: non_neg_integer(),
+             disk_read_count       :: non_neg_integer(),
+             disk_write_count      :: non_neg_integer(),
+
+             io_batch_size         :: pos_integer(),
+             mode                  :: 'default' | 'lazy' }.
+%% Duplicated from rabbit_backing_queue
+-spec ack([ack()], state()) -> {[rabbit_guid:guid()], state()}.
+
+-spec multiple_routing_keys() -> 'ok'.
+
+-define(BLANK_DELTA, #delta { start_seq_id = undefined,
+                              count        = 0,
+                              end_seq_id   = undefined }).
+-define(BLANK_DELTA_PATTERN(Z), #delta { start_seq_id = Z,
+                                         count        = 0,
+                                         end_seq_id   = Z }).
+
+-define(MICROS_PER_SECOND, 1000000.0).
+
+%% We're sampling every 5s for RAM duration; a half life that is of
+%% the same order of magnitude is probably about right.
+-define(RATE_AVG_HALF_LIFE, 5.0).
+
+%% We will recalculate the #rates{} every time we get asked for our
+%% RAM duration, or every N messages published, whichever is
+%% sooner. We do this since the priority calculations in
+%% rabbit_amqqueue_process need fairly fresh rates.
+-define(MSGS_PER_RATE_CALC, 100).
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+start(DurableQueues) ->
+    {AllTerms, StartFunState} = rabbit_queue_index:start(DurableQueues),
+    start_msg_store(
+      [Ref || Terms <- AllTerms,
+              Terms /= non_clean_shutdown,
+              begin
+                  Ref = proplists:get_value(persistent_ref, Terms),
+                  Ref =/= undefined
+              end],
+      StartFunState),
+    {ok, AllTerms}.
+
+stop() ->
+    ok = stop_msg_store(),
+    ok = rabbit_queue_index:stop().
+
+start_msg_store(Refs, StartFunState) ->
+    ok = rabbit_sup:start_child(?TRANSIENT_MSG_STORE, rabbit_msg_store,
+                                [?TRANSIENT_MSG_STORE, rabbit_mnesia:dir(),
+                                 undefined,  {fun (ok) -> finished end, ok}]),
+    ok = rabbit_sup:start_child(?PERSISTENT_MSG_STORE, rabbit_msg_store,
+                                [?PERSISTENT_MSG_STORE, rabbit_mnesia:dir(),
+                                 Refs, StartFunState]).
+
+stop_msg_store() ->
+    ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE),
+    ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE).
+
+init(Queue, Recover, Callback) ->
+    init(
+      Queue, Recover, Callback,
+      fun (MsgIds, ActionTaken) ->
+              msgs_written_to_disk(Callback, MsgIds, ActionTaken)
+      end,
+      fun (MsgIds) -> msg_indices_written_to_disk(Callback, MsgIds) end,
+      fun (MsgIds) -> msgs_and_indices_written_to_disk(Callback, MsgIds) end).
+
+init(#amqqueue { name = QueueName, durable = IsDurable }, new,
+     AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) ->
+    IndexState = rabbit_queue_index:init(QueueName,
+                                         MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
+    init(IsDurable, IndexState, 0, 0, [],
+         case IsDurable of
+             true  -> msg_store_client_init(?PERSISTENT_MSG_STORE,
+                                            MsgOnDiskFun, AsyncCallback);
+             false -> undefined
+         end,
+         msg_store_client_init(?TRANSIENT_MSG_STORE, undefined, AsyncCallback));
+
+%% We can be recovering a transient queue if it crashed
+init(#amqqueue { name = QueueName, durable = IsDurable }, Terms,
+     AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) ->
+    {PRef, RecoveryTerms} = process_recovery_terms(Terms),
+    {PersistentClient, ContainsCheckFun} =
+        case IsDurable of
+            true  -> C = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef,
+                                               MsgOnDiskFun, AsyncCallback),
+                     {C, fun (MsgId) when is_binary(MsgId) ->
+                                 rabbit_msg_store:contains(MsgId, C);
+                             (#basic_message{is_persistent = Persistent}) ->
+                                 Persistent
+                         end};
+            false -> {undefined, fun(_MsgId) -> false end}
+        end,
+    TransientClient  = msg_store_client_init(?TRANSIENT_MSG_STORE,
+                                             undefined, AsyncCallback),
+    {DeltaCount, DeltaBytes, IndexState} =
+        rabbit_queue_index:recover(
+          QueueName, RecoveryTerms,
+          rabbit_msg_store:successfully_recovered_state(?PERSISTENT_MSG_STORE),
+          ContainsCheckFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
+    init(IsDurable, IndexState, DeltaCount, DeltaBytes, RecoveryTerms,
+         PersistentClient, TransientClient).
+
+process_recovery_terms(Terms=non_clean_shutdown) ->
+    {rabbit_guid:gen(), Terms};
+process_recovery_terms(Terms) ->
+    case proplists:get_value(persistent_ref, Terms) of
+        undefined -> {rabbit_guid:gen(), []};
+        PRef      -> {PRef, Terms}
+    end.
+
+terminate(_Reason, State) ->
+    State1 = #vqstate { persistent_count  = PCount,
+                        persistent_bytes  = PBytes,
+                        index_state       = IndexState,
+                        msg_store_clients = {MSCStateP, MSCStateT} } =
+        purge_pending_ack(true, State),
+    PRef = case MSCStateP of
+               undefined -> undefined;
+               _         -> ok = rabbit_msg_store:client_terminate(MSCStateP),
+                            rabbit_msg_store:client_ref(MSCStateP)
+           end,
+    ok = rabbit_msg_store:client_delete_and_terminate(MSCStateT),
+    Terms = [{persistent_ref,   PRef},
+             {persistent_count, PCount},
+             {persistent_bytes, PBytes}],
+    a(State1 #vqstate { index_state       = rabbit_queue_index:terminate(
+                                              Terms, IndexState),
+                        msg_store_clients = undefined }).
+
+%% the only difference between purge and delete is that delete also
+%% needs to delete everything that's been delivered and not ack'd.
+delete_and_terminate(_Reason, State) ->
+    %% Normally when we purge messages we interact with the qi by
+    %% issues delivers and acks for every purged message. In this case
+    %% we don't need to do that, so we just delete the qi.
+    State1 = purge_and_index_reset(State),
+    State2 = #vqstate { msg_store_clients = {MSCStateP, MSCStateT} } =
+        purge_pending_ack_delete_and_terminate(State1),
+    case MSCStateP of
+        undefined -> ok;
+        _         -> rabbit_msg_store:client_delete_and_terminate(MSCStateP)
+    end,
+    rabbit_msg_store:client_delete_and_terminate(MSCStateT),
+    a(State2 #vqstate { msg_store_clients = undefined }).
+
+delete_crashed(#amqqueue{name = QName}) ->
+    ok = rabbit_queue_index:erase(QName).
+
+purge(State = #vqstate { len = Len, qi_pending_ack= QPA }) ->
+    maybe_delay(QPA),
+    case is_pending_ack_empty(State) of
+        true ->
+            {Len, purge_and_index_reset(State)};
+        false ->
+            {Len, purge_when_pending_acks(State)}
+    end.
+
+purge_acks(State) -> a(purge_pending_ack(false, State)).
+
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State) ->
+    State1 =
+        publish1(Msg, MsgProps, IsDelivered, ChPid, Flow,
+                 fun maybe_write_to_disk/4,
+                 State),
+    a(reduce_memory_use(maybe_update_rates(State1))).
+
+batch_publish(Publishes, ChPid, Flow, State) ->
+    {ChPid, Flow, State1} =
+        lists:foldl(fun batch_publish1/2, {ChPid, Flow, State}, Publishes),
+    State2 = ui(State1),
+    a(reduce_memory_use(maybe_update_rates(State2))).
+
+publish_delivered(Msg, MsgProps, ChPid, Flow, State) ->
+    {SeqId, State1} =
+        publish_delivered1(Msg, MsgProps, ChPid, Flow,
+                           fun maybe_write_to_disk/4,
+                           State),
+    {SeqId, a(reduce_memory_use(maybe_update_rates(State1)))}.
+
+batch_publish_delivered(Publishes, ChPid, Flow, State) ->
+    {ChPid, Flow, SeqIds, State1} =
+        lists:foldl(fun batch_publish_delivered1/2,
+                    {ChPid, Flow, [], State}, Publishes),
+    State2 = ui(State1),
+    {lists:reverse(SeqIds), a(reduce_memory_use(maybe_update_rates(State2)))}.
+
+discard(_MsgId, _ChPid, _Flow, State) -> State.
+
+drain_confirmed(State = #vqstate { confirmed = C }) ->
+    case gb_sets:is_empty(C) of
+        true  -> {[], State}; %% common case
+        false -> {gb_sets:to_list(C), State #vqstate {
+                                        confirmed = gb_sets:new() }}
+    end.
+
+dropwhile(Pred, State) ->
+    {MsgProps, State1} =
+        remove_by_predicate(Pred, State),
+    {MsgProps, a(State1)}.
+
+fetchwhile(Pred, Fun, Acc, State) ->
+    {MsgProps, Acc1, State1} =
+         fetch_by_predicate(Pred, Fun, Acc, State),
+    {MsgProps, Acc1, a(State1)}.
+
+fetch(AckRequired, State) ->
+    case queue_out(State) of
+        {empty, State1} ->
+            {empty, a(State1)};
+        {{value, MsgStatus}, State1} ->
+            %% it is possible that the message wasn't read from disk
+            %% at this point, so read it in.
+            {Msg, State2} = read_msg(MsgStatus, State1),
+            {AckTag, State3} = remove(AckRequired, MsgStatus, State2),
+            {{Msg, MsgStatus#msg_status.is_delivered, AckTag}, a(State3)}
+    end.
+
+drop(AckRequired, State) ->
+    case queue_out(State) of
+        {empty, State1} ->
+            {empty, a(State1)};
+        {{value, MsgStatus}, State1} ->
+            {AckTag, State2} = remove(AckRequired, MsgStatus, State1),
+            {{MsgStatus#msg_status.msg_id, AckTag}, a(State2)}
+    end.
+
+ack([], State) ->
+    {[], State};
+%% optimisation: this head is essentially a partial evaluation of the
+%% general case below, for the single-ack case.
+ack([SeqId], State) ->
+    {#msg_status { msg_id        = MsgId,
+                   is_persistent = IsPersistent,
+                   msg_in_store  = MsgInStore,
+                   index_on_disk = IndexOnDisk },
+     State1 = #vqstate { index_state       = IndexState,
+                         msg_store_clients = MSCState,
+                         ack_out_counter   = AckOutCount }} =
+        remove_pending_ack(true, SeqId, State),
+    IndexState1 = case IndexOnDisk of
+                      true  -> rabbit_queue_index:ack([SeqId], IndexState);
+                      false -> IndexState
+                  end,
+    case MsgInStore of
+        true  -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]);
+        false -> ok
+    end,
+    {[MsgId],
+     a(State1 #vqstate { index_state      = IndexState1,
+                         ack_out_counter  = AckOutCount + 1 })};
+ack(AckTags, State) ->
+    {{IndexOnDiskSeqIds, MsgIdsByStore, AllMsgIds},
+     State1 = #vqstate { index_state       = IndexState,
+                         msg_store_clients = MSCState,
+                         ack_out_counter   = AckOutCount }} =
+        lists:foldl(
+          fun (SeqId, {Acc, State2}) ->
+                  {MsgStatus, State3} = remove_pending_ack(true, SeqId, State2),
+                  {accumulate_ack(MsgStatus, Acc), State3}
+          end, {accumulate_ack_init(), State}, AckTags),
+    IndexState1 = rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
+    remove_msgs_by_id(MsgIdsByStore, MSCState),
+    {lists:reverse(AllMsgIds),
+     a(State1 #vqstate { index_state      = IndexState1,
+                         ack_out_counter  = AckOutCount + length(AckTags) })}.
+
+requeue(AckTags, #vqstate { mode          = default,
+                            delta         = Delta,
+                            q3            = Q3,
+                            q4            = Q4,
+                            in_counter    = InCounter,
+                            len           = Len,
+                            qi_pending_ack = QPA } = State) ->
+    maybe_delay(QPA),
+    {SeqIds,  Q4a, MsgIds,  State1} = queue_merge(lists:sort(AckTags), Q4, [],
+                                                  beta_limit(Q3),
+                                                  fun publish_alpha/2, State),
+    {SeqIds1, Q3a, MsgIds1, State2} = queue_merge(SeqIds, Q3, MsgIds,
+                                                  delta_limit(Delta),
+                                                  fun publish_beta/2, State1),
+    {Delta1, MsgIds2, State3}       = delta_merge(SeqIds1, Delta, MsgIds1,
+                                                  State2),
+    MsgCount = length(MsgIds2),
+    {MsgIds2, a(reduce_memory_use(
+                  maybe_update_rates(
+                    State3 #vqstate { delta      = Delta1,
+                                      q3         = Q3a,
+                                      q4         = Q4a,
+                                      in_counter = InCounter + MsgCount,
+                                      len        = Len + MsgCount })))};
+requeue(AckTags, #vqstate { mode          = lazy,
+                            delta         = Delta,
+                            q3            = Q3,
+                            in_counter    = InCounter,
+                            len           = Len,
+                            qi_pending_ack = QPA } = State) ->
+    maybe_delay(QPA),
+    {SeqIds, Q3a, MsgIds, State1} = queue_merge(lists:sort(AckTags), Q3, [],
+                                                delta_limit(Delta),
+                                                fun publish_beta/2, State),
+    {Delta1, MsgIds1, State2}     = delta_merge(SeqIds, Delta, MsgIds,
+                                                State1),
+    MsgCount = length(MsgIds1),
+    {MsgIds1, a(reduce_memory_use(
+                  maybe_update_rates(
+                    State2 #vqstate { delta      = Delta1,
+                                      q3         = Q3a,
+                                      in_counter = InCounter + MsgCount,
+                                      len        = Len + MsgCount })))}.
+
+ackfold(MsgFun, Acc, State, AckTags) ->
+    {AccN, StateN} =
+        lists:foldl(fun(SeqId, {Acc0, State0}) ->
+                            MsgStatus = lookup_pending_ack(SeqId, State0),
+                            {Msg, State1} = read_msg(MsgStatus, State0),
+                            {MsgFun(Msg, SeqId, Acc0), State1}
+                    end, {Acc, State}, AckTags),
+    {AccN, a(StateN)}.
+
+fold(Fun, Acc, State = #vqstate{index_state = IndexState}) ->
+    {Its, IndexState1} = lists:foldl(fun inext/2, {[], IndexState},
+                                     [msg_iterator(State),
+                                      disk_ack_iterator(State),
+                                      ram_ack_iterator(State),
+                                      qi_ack_iterator(State)]),
+    ifold(Fun, Acc, Its, State#vqstate{index_state = IndexState1}).
+
+len(#vqstate { len = Len, qi_pending_ack = QPA }) ->
+    maybe_delay(QPA),
+    Len.
+
+is_empty(State) -> 0 == len(State).
+
+depth(State) ->
+    len(State) + count_pending_acks(State).
+
+set_ram_duration_target(
+  DurationTarget, State = #vqstate {
+                    rates = #rates { in      = AvgIngressRate,
+                                     out     = AvgEgressRate,
+                                     ack_in  = AvgAckIngressRate,
+                                     ack_out = AvgAckEgressRate },
+                    target_ram_count = TargetRamCount }) ->
+    Rate =
+        AvgEgressRate + AvgIngressRate + AvgAckEgressRate + AvgAckIngressRate,
+    TargetRamCount1 =
+        case DurationTarget of
+            infinity  -> infinity;
+            _         -> trunc(DurationTarget * Rate) %% msgs = sec * msgs/sec
+        end,
+    State1 = State #vqstate { target_ram_count = TargetRamCount1 },
+    a(case TargetRamCount1 == infinity orelse
+          (TargetRamCount =/= infinity andalso
+           TargetRamCount1 >= TargetRamCount) of
+          true  -> State1;
+          false -> reduce_memory_use(State1)
+      end).
+
+maybe_update_rates(State = #vqstate{ in_counter  = InCount,
+                                     out_counter = OutCount })
+  when InCount + OutCount > ?MSGS_PER_RATE_CALC ->
+    update_rates(State);
+maybe_update_rates(State) ->
+    State.
+
+update_rates(State = #vqstate{ in_counter      =     InCount,
+                               out_counter     =    OutCount,
+                               ack_in_counter  =  AckInCount,
+                               ack_out_counter = AckOutCount,
+                               rates = #rates{ in        =     InRate,
+                                               out       =    OutRate,
+                                               ack_in    =  AckInRate,
+                                               ack_out   = AckOutRate,
+                                               timestamp = TS }}) ->
+    Now = time_compat:monotonic_time(),
+
+    Rates = #rates { in        = update_rate(Now, TS,     InCount,     InRate),
+                     out       = update_rate(Now, TS,    OutCount,    OutRate),
+                     ack_in    = update_rate(Now, TS,  AckInCount,  AckInRate),
+                     ack_out   = update_rate(Now, TS, AckOutCount, AckOutRate),
+                     timestamp = Now },
+
+    State#vqstate{ in_counter      = 0,
+                   out_counter     = 0,
+                   ack_in_counter  = 0,
+                   ack_out_counter = 0,
+                   rates           = Rates }.
+
+update_rate(Now, TS, Count, Rate) ->
+    Time = time_compat:convert_time_unit(Now - TS, native, micro_seconds) /
+        ?MICROS_PER_SECOND,
+    if
+        Time == 0 -> Rate;
+        true      -> rabbit_misc:moving_average(Time, ?RATE_AVG_HALF_LIFE,
+                                                Count / Time, Rate)
+    end.
+
+ram_duration(State) ->
+    State1 = #vqstate { rates = #rates { in      = AvgIngressRate,
+                                         out     = AvgEgressRate,
+                                         ack_in  = AvgAckIngressRate,
+                                         ack_out = AvgAckEgressRate },
+                        ram_msg_count      = RamMsgCount,
+                        ram_msg_count_prev = RamMsgCountPrev,
+                        ram_pending_ack    = RPA,
+                        qi_pending_ack     = QPA,
+                        ram_ack_count_prev = RamAckCountPrev } =
+        update_rates(State),
+
+    RamAckCount = gb_trees:size(RPA) + gb_trees:size(QPA),
+
+    Duration = %% msgs+acks / (msgs+acks/sec) == sec
+        case lists:all(fun (X) -> X < 0.01 end,
+                       [AvgEgressRate, AvgIngressRate,
+                        AvgAckEgressRate, AvgAckIngressRate]) of
+            true  -> infinity;
+            false -> (RamMsgCountPrev + RamMsgCount +
+                          RamAckCount + RamAckCountPrev) /
+                         (4 * (AvgEgressRate + AvgIngressRate +
+                                   AvgAckEgressRate + AvgAckIngressRate))
+        end,
+
+    {Duration, State1}.
+
+needs_timeout(#vqstate { index_state = IndexState }) ->
+    case rabbit_queue_index:needs_sync(IndexState) of
+        confirms -> timed;
+        other    -> idle;
+        false    -> false
+    end.
+
+timeout(State = #vqstate { index_state = IndexState }) ->
+    State #vqstate { index_state = rabbit_queue_index:sync(IndexState) }.
+
+handle_pre_hibernate(State = #vqstate { index_state = IndexState }) ->
+    State #vqstate { index_state = rabbit_queue_index:flush(IndexState) }.
+
+resume(State) -> a(reduce_memory_use(State)).
+
+msg_rates(#vqstate { rates = #rates { in  = AvgIngressRate,
+                                      out = AvgEgressRate } }) ->
+    {AvgIngressRate, AvgEgressRate}.
+
+info(messages_ready_ram, #vqstate{ram_msg_count = RamMsgCount}) ->
+    RamMsgCount;
+info(messages_unacknowledged_ram, #vqstate{ram_pending_ack = RPA,
+                                           qi_pending_ack  = QPA}) ->
+    gb_trees:size(RPA) + gb_trees:size(QPA);
+info(messages_ram, State) ->
+    info(messages_ready_ram, State) + info(messages_unacknowledged_ram, State);
+info(messages_persistent, #vqstate{persistent_count = PersistentCount}) ->
+    PersistentCount;
+info(message_bytes, #vqstate{bytes         = Bytes,
+                             unacked_bytes = UBytes}) ->
+    Bytes + UBytes;
+info(message_bytes_ready, #vqstate{bytes = Bytes}) ->
+    Bytes;
+info(message_bytes_unacknowledged, #vqstate{unacked_bytes = UBytes}) ->
+    UBytes;
+info(message_bytes_ram, #vqstate{ram_bytes = RamBytes}) ->
+    RamBytes;
+info(message_bytes_persistent, #vqstate{persistent_bytes = PersistentBytes}) ->
+    PersistentBytes;
+info(head_message_timestamp, #vqstate{
+          q3               = Q3,
+          q4               = Q4,
+          ram_pending_ack  = RPA,
+          qi_pending_ack   = QPA}) ->
+          head_message_timestamp(Q3, Q4, RPA, QPA);
+info(disk_reads, #vqstate{disk_read_count = Count}) ->
+    Count;
+info(disk_writes, #vqstate{disk_write_count = Count}) ->
+    Count;
+info(backing_queue_status, #vqstate {
+          q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+          mode             = Mode,
+          len              = Len,
+          target_ram_count = TargetRamCount,
+          next_seq_id      = NextSeqId,
+          rates            = #rates { in      = AvgIngressRate,
+                                      out     = AvgEgressRate,
+                                      ack_in  = AvgAckIngressRate,
+                                      ack_out = AvgAckEgressRate }}) ->
+
+    [ {mode                , Mode},
+      {q1                  , ?QUEUE:len(Q1)},
+      {q2                  , ?QUEUE:len(Q2)},
+      {delta               , Delta},
+      {q3                  , ?QUEUE:len(Q3)},
+      {q4                  , ?QUEUE:len(Q4)},
+      {len                 , Len},
+      {target_ram_count    , TargetRamCount},
+      {next_seq_id         , NextSeqId},
+      {avg_ingress_rate    , AvgIngressRate},
+      {avg_egress_rate     , AvgEgressRate},
+      {avg_ack_ingress_rate, AvgAckIngressRate},
+      {avg_ack_egress_rate , AvgAckEgressRate} ];
+info(Item, _) ->
+    throw({bad_argument, Item}).
+
+invoke(?MODULE, Fun, State) -> Fun(?MODULE, State);
+invoke(      _,   _, State) -> State.
+
+is_duplicate(_Msg, State) -> {false, State}.
+
+set_queue_mode(Mode, State = #vqstate { mode = Mode }) ->
+    State;
+set_queue_mode(lazy, State = #vqstate {
+                                target_ram_count = TargetRamCount }) ->
+    %% To become a lazy queue we need to page everything to disk first.
+    State1 = convert_to_lazy(State),
+    %% restore the original target_ram_count
+    a(State1 #vqstate { mode = lazy, target_ram_count = TargetRamCount });
+set_queue_mode(default, State) ->
+    %% becoming a default queue means loading messages from disk like
+    %% when a queue is recovered.
+    a(maybe_deltas_to_betas(State #vqstate { mode = default }));
+set_queue_mode(_, State) ->
+    State.
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator, _State) ->
+    lists:foldl(fun ({{#basic_message{ id = Id }, _Props}, AckTag}, Acc) ->
+                        [{Id, AckTag} | Acc]
+                end, Accumulator, lists:zip(Msgs, AckTags)).
+
+convert_to_lazy(State) ->
+    State1 = #vqstate { delta = Delta, q3 = Q3, len = Len } =
+        set_ram_duration_target(0, State),
+    case Delta#delta.count + ?QUEUE:len(Q3) == Len of
+        true ->
+            State1;
+        false ->
+            %% When pushing messages to disk, we might have been
+            %% blocked by the msg_store, so we need to see if we have
+            %% to wait for more credit, and then keep paging messages.
+            %%
+            %% The amqqueue_process could have taken care of this, but
+            %% between the time it receives the bump_credit msg and
+            %% calls BQ:resume to keep paging messages to disk, some
+            %% other request may arrive to the BQ which at this moment
+            %% is not in a proper state for a lazy BQ (unless all
+            %% messages have been paged to disk already).
+            wait_for_msg_store_credit(),
+            convert_to_lazy(State1)
+    end.
+
+wait_for_msg_store_credit() ->
+    case credit_flow:blocked() of
+        true  -> receive
+                     {bump_credit, Msg} ->
+                         credit_flow:handle_bump_msg(Msg)
+                 end;
+        false -> ok
+    end.
+
+%% Get the Timestamp property of the first msg, if present. This is
+%% the one with the oldest timestamp among the heads of the pending
+%% acks and unread queues.  We can't check disk_pending_acks as these
+%% are paged out - we assume some will soon be paged in rather than
+%% forcing it to happen.  Pending ack msgs are included as they are
+%% regarded as unprocessed until acked, this also prevents the result
+%% apparently oscillating during repeated rejects.  Q3 is only checked
+%% when Q4 is empty as any Q4 msg will be earlier.
+head_message_timestamp(Q3, Q4, RPA, QPA) ->
+    HeadMsgs = [ HeadMsgStatus#msg_status.msg ||
+                   HeadMsgStatus <-
+                       [ get_qs_head([Q4, Q3]),
+                         get_pa_head(RPA),
+                         get_pa_head(QPA) ],
+                   HeadMsgStatus /= undefined,
+                   HeadMsgStatus#msg_status.msg /= undefined ],
+
+    Timestamps =
+        [Timestamp || HeadMsg <- HeadMsgs,
+                      Timestamp <- [rabbit_basic:extract_timestamp(
+                                      HeadMsg#basic_message.content)],
+                      Timestamp /= undefined
+        ],
+
+    case Timestamps == [] of
+        true -> '';
+        false -> lists:min(Timestamps)
+    end.
+
+get_qs_head(Qs) ->
+    catch lists:foldl(
+            fun (Q, Acc) ->
+                    case get_q_head(Q) of
+                        undefined -> Acc;
+                        Val -> throw(Val)
+                    end
+            end, undefined, Qs).
+
+get_q_head(Q) ->
+    get_collection_head(Q, fun ?QUEUE:is_empty/1, fun ?QUEUE:peek/1).
+
+get_pa_head(PA) ->
+    get_collection_head(PA, fun gb_trees:is_empty/1, fun gb_trees:smallest/1).
+
+get_collection_head(Col, IsEmpty, GetVal) ->
+    case IsEmpty(Col) of
+        false ->
+            {_, MsgStatus} = GetVal(Col),
+            MsgStatus;
+        true  -> undefined
+    end.
+
+%%----------------------------------------------------------------------------
+%% Minor helpers
+%%----------------------------------------------------------------------------
+a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+                     mode             = default,
+                     len              = Len,
+                     bytes            = Bytes,
+                     unacked_bytes    = UnackedBytes,
+                     persistent_count = PersistentCount,
+                     persistent_bytes = PersistentBytes,
+                     ram_msg_count    = RamMsgCount,
+                     ram_bytes        = RamBytes}) ->
+    E1 = ?QUEUE:is_empty(Q1),
+    E2 = ?QUEUE:is_empty(Q2),
+    ED = Delta#delta.count == 0,
+    E3 = ?QUEUE:is_empty(Q3),
+    E4 = ?QUEUE:is_empty(Q4),
+    LZ = Len == 0,
+
+    %% if q1 has messages then q3 cannot be empty. See publish/6.
+    true = E1 or not E3,
+    %% if q2 has messages then we have messages in delta (paged to
+    %% disk). See push_alphas_to_betas/2.
+    true = E2 or not ED,
+    %% if delta has messages then q3 cannot be empty. This is enforced
+    %% by paging, where min([?SEGMENT_ENTRY_COUNT, len(q3)]) messages
+    %% are always kept on RAM.
+    true = ED or not E3,
+    %% if the queue length is 0, then q3 and q4 must be empty.
+    true = LZ == (E3 and E4),
+
+    true = Len             >= 0,
+    true = Bytes           >= 0,
+    true = UnackedBytes    >= 0,
+    true = PersistentCount >= 0,
+    true = PersistentBytes >= 0,
+    true = RamMsgCount     >= 0,
+    true = RamMsgCount     =< Len,
+    true = RamBytes        >= 0,
+    true = RamBytes        =< Bytes + UnackedBytes,
+
+    State;
+a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+                     mode             = lazy,
+                     len              = Len,
+                     bytes            = Bytes,
+                     unacked_bytes    = UnackedBytes,
+                     persistent_count = PersistentCount,
+                     persistent_bytes = PersistentBytes,
+                     ram_msg_count    = RamMsgCount,
+                     ram_bytes        = RamBytes}) ->
+    E1 = ?QUEUE:is_empty(Q1),
+    E2 = ?QUEUE:is_empty(Q2),
+    ED = Delta#delta.count == 0,
+    E3 = ?QUEUE:is_empty(Q3),
+    E4 = ?QUEUE:is_empty(Q4),
+    LZ = Len == 0,
+    L3 = ?QUEUE:len(Q3),
+
+    %% q1 must always be empty, since q1 only gets messages during
+    %% publish, but for lazy queues messages go straight to delta.
+    true = E1,
+
+    %% q2 only gets messages from q1 when push_alphas_to_betas is
+    %% called for a non empty delta, which won't be the case for a
+    %% lazy queue. This means q2 must always be empty.
+    true = E2,
+
+    %% q4 must always be empty, since q1 only gets messages during
+    %% publish, but for lazy queues messages go straight to delta.
+    true = E4,
+
+    %% if the queue is empty, then delta is empty and q3 is empty.
+    true = LZ == (ED and E3),
+
+    %% There should be no messages in q1, q2, and q4
+    true = Delta#delta.count + L3 == Len,
+
+    true = Len             >= 0,
+    true = Bytes           >= 0,
+    true = UnackedBytes    >= 0,
+    true = PersistentCount >= 0,
+    true = PersistentBytes >= 0,
+    true = RamMsgCount     >= 0,
+    true = RamMsgCount     =< Len,
+    true = RamBytes        >= 0,
+    true = RamBytes        =< Bytes + UnackedBytes,
+
+    State.
+
+d(Delta = #delta { start_seq_id = Start, count = Count, end_seq_id = End })
+  when Start + Count =< End ->
+    Delta.
+
+m(MsgStatus = #msg_status { is_persistent = IsPersistent,
+                            msg_in_store  = MsgInStore,
+                            index_on_disk = IndexOnDisk }) ->
+    true = (not IsPersistent) or IndexOnDisk,
+    true = msg_in_ram(MsgStatus) or MsgInStore,
+    MsgStatus.
+
+one_if(true ) -> 1;
+one_if(false) -> 0.
+
+cons_if(true,   E, L) -> [E | L];
+cons_if(false, _E, L) -> L.
+
+gb_sets_maybe_insert(false, _Val, Set) -> Set;
+gb_sets_maybe_insert(true,   Val, Set) -> gb_sets:add(Val, Set).
+
+msg_status(IsPersistent, IsDelivered, SeqId,
+           Msg = #basic_message {id = MsgId}, MsgProps, IndexMaxSize) ->
+    #msg_status{seq_id        = SeqId,
+                msg_id        = MsgId,
+                msg           = Msg,
+                is_persistent = IsPersistent,
+                is_delivered  = IsDelivered,
+                msg_in_store  = false,
+                index_on_disk = false,
+                persist_to    = determine_persist_to(Msg, MsgProps, IndexMaxSize),
+                msg_props     = MsgProps}.
+
+beta_msg_status({Msg = #basic_message{id = MsgId},
+                 SeqId, MsgProps, IsPersistent, IsDelivered}) ->
+    MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered),
+    MS0#msg_status{msg_id       = MsgId,
+                   msg          = Msg,
+                   persist_to   = queue_index,
+                   msg_in_store = false};
+
+beta_msg_status({MsgId, SeqId, MsgProps, IsPersistent, IsDelivered}) ->
+    MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered),
+    MS0#msg_status{msg_id       = MsgId,
+                   msg          = undefined,
+                   persist_to   = msg_store,
+                   msg_in_store = true}.
+
+beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered) ->
+  #msg_status{seq_id        = SeqId,
+              msg           = undefined,
+              is_persistent = IsPersistent,
+              is_delivered  = IsDelivered,
+              index_on_disk = true,
+              msg_props     = MsgProps}.
+
+trim_msg_status(MsgStatus) ->
+    case persist_to(MsgStatus) of
+        msg_store   -> MsgStatus#msg_status{msg = undefined};
+        queue_index -> MsgStatus
+    end.
+
+with_msg_store_state({MSCStateP, MSCStateT},  true, Fun) ->
+    {Result, MSCStateP1} = Fun(MSCStateP),
+    {Result, {MSCStateP1, MSCStateT}};
+with_msg_store_state({MSCStateP, MSCStateT}, false, Fun) ->
+    {Result, MSCStateT1} = Fun(MSCStateT),
+    {Result, {MSCStateP, MSCStateT1}}.
+
+with_immutable_msg_store_state(MSCState, IsPersistent, Fun) ->
+    {Res, MSCState} = with_msg_store_state(MSCState, IsPersistent,
+                                           fun (MSCState1) ->
+                                                   {Fun(MSCState1), MSCState1}
+                                           end),
+    Res.
+
+msg_store_client_init(MsgStore, MsgOnDiskFun, Callback) ->
+    msg_store_client_init(MsgStore, rabbit_guid:gen(), MsgOnDiskFun,
+                          Callback).
+
+msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback) ->
+    CloseFDsFun = msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE),
+    rabbit_msg_store:client_init(MsgStore, Ref, MsgOnDiskFun,
+                                 fun () -> Callback(?MODULE, CloseFDsFun) end).
+
+msg_store_write(MSCState, IsPersistent, MsgId, Msg) ->
+    with_immutable_msg_store_state(
+      MSCState, IsPersistent,
+      fun (MSCState1) ->
+              rabbit_msg_store:write_flow(MsgId, Msg, MSCState1)
+      end).
+
+msg_store_read(MSCState, IsPersistent, MsgId) ->
+    with_msg_store_state(
+      MSCState, IsPersistent,
+      fun (MSCState1) ->
+              rabbit_msg_store:read(MsgId, MSCState1)
+      end).
+
+msg_store_remove(MSCState, IsPersistent, MsgIds) ->
+    with_immutable_msg_store_state(
+      MSCState, IsPersistent,
+      fun (MCSState1) ->
+              rabbit_msg_store:remove(MsgIds, MCSState1)
+      end).
+
+msg_store_close_fds(MSCState, IsPersistent) ->
+    with_msg_store_state(
+      MSCState, IsPersistent,
+      fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end).
+
+msg_store_close_fds_fun(IsPersistent) ->
+    fun (?MODULE, State = #vqstate { msg_store_clients = MSCState }) ->
+            {ok, MSCState1} = msg_store_close_fds(MSCState, IsPersistent),
+            State #vqstate { msg_store_clients = MSCState1 }
+    end.
+
+maybe_write_delivered(false, _SeqId, IndexState) ->
+    IndexState;
+maybe_write_delivered(true, SeqId, IndexState) ->
+    rabbit_queue_index:deliver([SeqId], IndexState).
+
+betas_from_index_entries(List, TransientThreshold, DelsAndAcksFun, State) ->
+    {Filtered, Delivers, Acks, RamReadyCount, RamBytes} =
+        lists:foldr(
+          fun ({_MsgOrId, SeqId, _MsgProps, IsPersistent, IsDelivered} = M,
+               {Filtered1, Delivers1, Acks1, RRC, RB} = Acc) ->
+                  case SeqId < TransientThreshold andalso not IsPersistent of
+                      true  -> {Filtered1,
+                                cons_if(not IsDelivered, SeqId, Delivers1),
+                                [SeqId | Acks1], RRC, RB};
+                      false -> MsgStatus = m(beta_msg_status(M)),
+                               HaveMsg = msg_in_ram(MsgStatus),
+                               Size = msg_size(MsgStatus),
+                               case is_msg_in_pending_acks(SeqId, State) of
+                                   false -> {?QUEUE:in_r(MsgStatus, Filtered1),
+                                             Delivers1, Acks1,
+                                             RRC + one_if(HaveMsg),
+                                             RB + one_if(HaveMsg) * Size};
+                                   true  -> Acc %% [0]
+                               end
+                  end
+          end, {?QUEUE:new(), [], [], 0, 0}, List),
+    {Filtered, RamReadyCount, RamBytes, DelsAndAcksFun(Delivers, Acks, State)}.
+%% [0] We don't increase RamBytes here, even though it pertains to
+%% unacked messages too, since if HaveMsg then the message must have
+%% been stored in the QI, thus the message must have been in
+%% qi_pending_ack, thus it must already have been in RAM.
+
+is_msg_in_pending_acks(SeqId, #vqstate { ram_pending_ack  = RPA,
+                                         disk_pending_ack = DPA,
+                                         qi_pending_ack   = QPA }) ->
+    (gb_trees:is_defined(SeqId, RPA) orelse
+     gb_trees:is_defined(SeqId, DPA) orelse
+     gb_trees:is_defined(SeqId, QPA)).
+
+expand_delta(SeqId, ?BLANK_DELTA_PATTERN(X)) ->
+    d(#delta { start_seq_id = SeqId, count = 1, end_seq_id = SeqId + 1 });
+expand_delta(SeqId, #delta { start_seq_id = StartSeqId,
+                             count        = Count } = Delta)
+  when SeqId < StartSeqId ->
+    d(Delta #delta { start_seq_id = SeqId, count = Count + 1 });
+expand_delta(SeqId, #delta { count        = Count,
+                             end_seq_id   = EndSeqId } = Delta)
+  when SeqId >= EndSeqId ->
+    d(Delta #delta { count = Count + 1, end_seq_id = SeqId + 1 });
+expand_delta(_SeqId, #delta { count       = Count } = Delta) ->
+    d(Delta #delta { count = Count + 1 }).
+
+%%----------------------------------------------------------------------------
+%% Internal major helpers for Public API
+%%----------------------------------------------------------------------------
+
+init(IsDurable, IndexState, DeltaCount, DeltaBytes, Terms,
+     PersistentClient, TransientClient) ->
+    {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState),
+
+    {DeltaCount1, DeltaBytes1} =
+        case Terms of
+            non_clean_shutdown -> {DeltaCount, DeltaBytes};
+            _                  -> {proplists:get_value(persistent_count,
+                                                       Terms, DeltaCount),
+                                   proplists:get_value(persistent_bytes,
+                                                       Terms, DeltaBytes)}
+        end,
+    Delta = case DeltaCount1 == 0 andalso DeltaCount /= undefined of
+                true  -> ?BLANK_DELTA;
+                false -> d(#delta { start_seq_id = LowSeqId,
+                                    count        = DeltaCount1,
+                                    end_seq_id   = NextSeqId })
+            end,
+    Now = time_compat:monotonic_time(),
+    IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size,
+                                      ?IO_BATCH_SIZE),
+
+    {ok, IndexMaxSize} = application:get_env(
+                           rabbit, queue_index_embed_msgs_below),
+    State = #vqstate {
+      q1                  = ?QUEUE:new(),
+      q2                  = ?QUEUE:new(),
+      delta               = Delta,
+      q3                  = ?QUEUE:new(),
+      q4                  = ?QUEUE:new(),
+      next_seq_id         = NextSeqId,
+      ram_pending_ack     = gb_trees:empty(),
+      disk_pending_ack    = gb_trees:empty(),
+      qi_pending_ack      = gb_trees:empty(),
+      index_state         = IndexState1,
+      msg_store_clients   = {PersistentClient, TransientClient},
+      durable             = IsDurable,
+      transient_threshold = NextSeqId,
+      qi_embed_msgs_below = IndexMaxSize,
+
+      len                 = DeltaCount1,
+      persistent_count    = DeltaCount1,
+      bytes               = DeltaBytes1,
+      persistent_bytes    = DeltaBytes1,
+
+      target_ram_count    = infinity,
+      ram_msg_count       = 0,
+      ram_msg_count_prev  = 0,
+      ram_ack_count_prev  = 0,
+      ram_bytes           = 0,
+      unacked_bytes       = 0,
+      out_counter         = 0,
+      in_counter          = 0,
+      rates               = blank_rates(Now),
+      msgs_on_disk        = gb_sets:new(),
+      msg_indices_on_disk = gb_sets:new(),
+      unconfirmed         = gb_sets:new(),
+      confirmed           = gb_sets:new(),
+      ack_out_counter     = 0,
+      ack_in_counter      = 0,
+      disk_read_count     = 0,
+      disk_write_count    = 0,
+
+      io_batch_size       = IoBatchSize,
+
+      mode                = default },
+    a(maybe_deltas_to_betas(State)).
+
+blank_rates(Now) ->
+    #rates { in        = 0.0,
+             out       = 0.0,
+             ack_in    = 0.0,
+             ack_out   = 0.0,
+             timestamp = Now}.
+
+in_r(MsgStatus = #msg_status { msg = undefined },
+     State = #vqstate { mode = default, q3 = Q3, q4 = Q4 }) ->
+    case ?QUEUE:is_empty(Q4) of
+        true  -> State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) };
+        false -> {Msg, State1 = #vqstate { q4 = Q4a }} =
+                     read_msg(MsgStatus, State),
+                 MsgStatus1 = MsgStatus#msg_status{msg = Msg},
+                 stats(ready0, {MsgStatus, MsgStatus1},
+                       State1 #vqstate { q4 = ?QUEUE:in_r(MsgStatus1, Q4a) })
+    end;
+in_r(MsgStatus,
+     State = #vqstate { mode = default, q4 = Q4 }) ->
+    State #vqstate { q4 = ?QUEUE:in_r(MsgStatus, Q4) };
+%% lazy queues
+in_r(MsgStatus = #msg_status { seq_id = SeqId },
+     State = #vqstate { mode = lazy, q3 = Q3, delta = Delta}) ->
+    case ?QUEUE:is_empty(Q3) of
+        true  ->
+            {_MsgStatus1, State1} =
+                maybe_write_to_disk(true, true, MsgStatus, State),
+            State2 = stats(ready0, {MsgStatus, none}, State1),
+            Delta1 = expand_delta(SeqId, Delta),
+            State2 #vqstate{ delta = Delta1 };
+        false ->
+            State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) }
+    end.
+
+queue_out(State = #vqstate { mode = default, q4 = Q4 }) ->
+    case ?QUEUE:out(Q4) of
+        {empty, _Q4} ->
+            case fetch_from_q3(State) of
+                {empty, _State1} = Result     -> Result;
+                {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1}
+            end;
+        {{value, MsgStatus}, Q4a} ->
+            {{value, MsgStatus}, State #vqstate { q4 = Q4a }}
+    end;
+%% lazy queues
+queue_out(State = #vqstate { mode = lazy }) ->
+    case fetch_from_q3(State) of
+        {empty, _State1} = Result     -> Result;
+        {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1}
+    end.
+
+read_msg(#msg_status{msg           = undefined,
+                     msg_id        = MsgId,
+                     is_persistent = IsPersistent}, State) ->
+    read_msg(MsgId, IsPersistent, State);
+read_msg(#msg_status{msg = Msg}, State) ->
+    {Msg, State}.
+
+read_msg(MsgId, IsPersistent, State = #vqstate{msg_store_clients = MSCState,
+                                               disk_read_count   = Count}) ->
+    {{ok, Msg = #basic_message {}}, MSCState1} =
+        msg_store_read(MSCState, IsPersistent, MsgId),
+    {Msg, State #vqstate {msg_store_clients = MSCState1,
+                          disk_read_count   = Count + 1}}.
+
+stats(Signs, Statuses, State) ->
+    stats0(expand_signs(Signs), expand_statuses(Statuses), State).
+
+expand_signs(ready0)        -> {0, 0, true};
+expand_signs(lazy_pub)      -> {1, 0, true};
+expand_signs({A, B})        -> {A, B, false}.
+
+expand_statuses({none, A})    -> {false,         msg_in_ram(A), A};
+expand_statuses({B,    none}) -> {msg_in_ram(B), false,         B};
+expand_statuses({lazy, A})    -> {false        , false,         A};
+expand_statuses({B,    A})    -> {msg_in_ram(B), msg_in_ram(A), B}.
+
+%% In this function at least, we are religious: the variable name
+%% contains "Ready" or "Unacked" iff that is what it counts. If
+%% neither is present it counts both.
+stats0({DeltaReady, DeltaUnacked, ReadyMsgPaged},
+       {InRamBefore, InRamAfter, MsgStatus},
+       State = #vqstate{len              = ReadyCount,
+                        bytes            = ReadyBytes,
+                        ram_msg_count    = RamReadyCount,
+                        persistent_count = PersistentCount,
+                        unacked_bytes    = UnackedBytes,
+                        ram_bytes        = RamBytes,
+                        persistent_bytes = PersistentBytes}) ->
+    S = msg_size(MsgStatus),
+    DeltaTotal = DeltaReady + DeltaUnacked,
+    DeltaRam = case {InRamBefore, InRamAfter} of
+                   {false, false} ->  0;
+                   {false, true}  ->  1;
+                   {true,  false} -> -1;
+                   {true,  true}  ->  0
+               end,
+    DeltaRamReady = case DeltaReady of
+                        1                    -> one_if(InRamAfter);
+                        -1                   -> -one_if(InRamBefore);
+                        0 when ReadyMsgPaged -> DeltaRam;
+                        0                    -> 0
+                    end,
+    DeltaPersistent = DeltaTotal * one_if(MsgStatus#msg_status.is_persistent),
+    State#vqstate{len               = ReadyCount      + DeltaReady,
+                  ram_msg_count     = RamReadyCount   + DeltaRamReady,
+                  persistent_count  = PersistentCount + DeltaPersistent,
+                  bytes             = ReadyBytes      + DeltaReady       * S,
+                  unacked_bytes     = UnackedBytes    + DeltaUnacked     * S,
+                  ram_bytes         = RamBytes        + DeltaRam         * S,
+                  persistent_bytes  = PersistentBytes + DeltaPersistent  * S}.
+
+msg_size(#msg_status{msg_props = #message_properties{size = Size}}) -> Size.
+
+msg_in_ram(#msg_status{msg = Msg}) -> Msg =/= undefined.
+
+%% first param: AckRequired
+remove(true, MsgStatus = #msg_status {
+               seq_id        = SeqId,
+               is_delivered  = IsDelivered,
+               index_on_disk = IndexOnDisk },
+       State = #vqstate {out_counter       = OutCount,
+                         index_state       = IndexState}) ->
+    %% Mark it delivered if necessary
+    IndexState1 = maybe_write_delivered(
+                    IndexOnDisk andalso not IsDelivered,
+                    SeqId, IndexState),
+
+    State1 = record_pending_ack(
+               MsgStatus #msg_status {
+                 is_delivered = true }, State),
+
+    State2 = stats({-1, 1}, {MsgStatus, MsgStatus}, State1),
+
+    {SeqId, maybe_update_rates(
+              State2 #vqstate {out_counter = OutCount + 1,
+                               index_state = IndexState1})};
+
+%% This function body has the same behaviour as remove_queue_entries/3
+%% but instead of removing messages based on a ?QUEUE, this removes
+%% just one message, the one referenced by the MsgStatus provided.
+remove(false, MsgStatus = #msg_status {
+                seq_id        = SeqId,
+                msg_id        = MsgId,
+                is_persistent = IsPersistent,
+                is_delivered  = IsDelivered,
+                msg_in_store  = MsgInStore,
+                index_on_disk = IndexOnDisk },
+       State = #vqstate {out_counter       = OutCount,
+                         index_state       = IndexState,
+                         msg_store_clients = MSCState}) ->
+    %% Mark it delivered if necessary
+    IndexState1 = maybe_write_delivered(
+                    IndexOnDisk andalso not IsDelivered,
+                    SeqId, IndexState),
+
+    %% Remove from msg_store and queue index, if necessary
+    case MsgInStore of
+        true  -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]);
+        false -> ok
+    end,
+
+    IndexState2 =
+        case IndexOnDisk of
+            true  -> rabbit_queue_index:ack([SeqId], IndexState1);
+            false -> IndexState1
+        end,
+
+    State1 = stats({-1, 0}, {MsgStatus, none}, State),
+
+    {undefined, maybe_update_rates(
+                  State1 #vqstate {out_counter = OutCount + 1,
+                                   index_state = IndexState2})}.
+
+%% This function exists as a way to improve dropwhile/2
+%% performance. The idea of having this function is to optimise calls
+%% to rabbit_queue_index by batching delivers and acks, instead of
+%% sending them one by one.
+%%
+%% Instead of removing every message as their are popped from the
+%% queue, it first accumulates them and then removes them by calling
+%% remove_queue_entries/3, since the behaviour of
+%% remove_queue_entries/3 when used with
+%% process_delivers_and_acks_fun(deliver_and_ack) is the same as
+%% calling remove(false, MsgStatus, State).
+%%
+%% remove/3 also updates the out_counter in every call, but here we do
+%% it just once at the end.
+remove_by_predicate(Pred, State = #vqstate {out_counter = OutCount}) ->
+    {MsgProps, QAcc, State1} =
+        collect_by_predicate(Pred, ?QUEUE:new(), State),
+    State2 =
+        remove_queue_entries(
+          QAcc, process_delivers_and_acks_fun(deliver_and_ack), State1),
+    %% maybe_update_rates/1 is called in remove/2 for every
+    %% message. Since we update out_counter only once, we call it just
+    %% there.
+    {MsgProps, maybe_update_rates(
+                 State2 #vqstate {
+                   out_counter = OutCount + ?QUEUE:len(QAcc)})}.
+
+%% This function exists as a way to improve fetchwhile/4
+%% performance. The idea of having this function is to optimise calls
+%% to rabbit_queue_index by batching delivers, instead of sending them
+%% one by one.
+%%
+%% Fun is the function passed to fetchwhile/4 that's
+%% applied to every fetched message and used to build the fetchwhile/4
+%% result accumulator FetchAcc.
+fetch_by_predicate(Pred, Fun, FetchAcc,
+                   State = #vqstate {
+                              index_state = IndexState,
+                              out_counter = OutCount}) ->
+    {MsgProps, QAcc, State1} =
+        collect_by_predicate(Pred, ?QUEUE:new(), State),
+
+    {Delivers, FetchAcc1, State2} =
+        process_queue_entries(QAcc, Fun, FetchAcc, State1),
+
+    IndexState1 = rabbit_queue_index:deliver(Delivers, IndexState),
+
+    {MsgProps, FetchAcc1, maybe_update_rates(
+                            State2 #vqstate {
+                              index_state = IndexState1,
+                              out_counter = OutCount + ?QUEUE:len(QAcc)})}.
+
+%% We try to do here the same as what remove(true, State) does but
+%% processing several messages at the same time. The idea is to
+%% optimize rabbit_queue_index:deliver/2 calls by sending a list of
+%% SeqIds instead of one by one, thus process_queue_entries1 will
+%% accumulate the required deliveries, will record_pending_ack for
+%% each message, and will update stats, like remove/2 does.
+%%
+%% For the meaning of Fun and FetchAcc arguments see
+%% fetch_by_predicate/4 above.
+process_queue_entries(Q, Fun, FetchAcc, State) ->
+    ?QUEUE:foldl(fun (MsgStatus, Acc) ->
+                         process_queue_entries1(MsgStatus, Fun, Acc)
+                 end,
+                 {[], FetchAcc, State}, Q).
+
+process_queue_entries1(
+  #msg_status { seq_id = SeqId, is_delivered = IsDelivered,
+                index_on_disk = IndexOnDisk} = MsgStatus,
+  Fun,
+  {Delivers, FetchAcc, State}) ->
+    {Msg, State1} = read_msg(MsgStatus, State),
+    State2 = record_pending_ack(
+               MsgStatus #msg_status {
+                 is_delivered = true }, State1),
+    {cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
+     Fun(Msg, SeqId, FetchAcc),
+     stats({-1, 1}, {MsgStatus, MsgStatus}, State2)}.
+
+collect_by_predicate(Pred, QAcc, State) ->
+    case queue_out(State) of
+        {empty, State1} ->
+            {undefined, QAcc, State1};
+        {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} ->
+            case Pred(MsgProps) of
+                true  -> collect_by_predicate(Pred, ?QUEUE:in(MsgStatus, QAcc),
+                                              State1);
+                false -> {MsgProps, QAcc, in_r(MsgStatus, State1)}
+            end
+    end.
+
+%%----------------------------------------------------------------------------
+%% Helpers for Public API purge/1 function
+%%----------------------------------------------------------------------------
+
+%% The difference between purge_when_pending_acks/1
+%% vs. purge_and_index_reset/1 is that the first one issues a deliver
+%% and an ack to the queue index for every message that's being
+%% removed, while the later just resets the queue index state.
+purge_when_pending_acks(State) ->
+    State1 = purge1(process_delivers_and_acks_fun(deliver_and_ack), State),
+    a(State1).
+
+purge_and_index_reset(State) ->
+    State1 = purge1(process_delivers_and_acks_fun(none), State),
+    a(reset_qi_state(State1)).
+
+%% This function removes messages from each of {q1, q2, q3, q4}.
+%%
+%% With remove_queue_entries/3 q1 and q4 are emptied, while q2 and q3
+%% are specially handled by purge_betas_and_deltas/2.
+%%
+%% purge_betas_and_deltas/2 loads messages from the queue index,
+%% filling up q3 and in some cases moving messages form q2 to q3 while
+%% reseting q2 to an empty queue (see maybe_deltas_to_betas/2). The
+%% messages loaded into q3 are removed by calling
+%% remove_queue_entries/3 until there are no more messages to be read
+%% from the queue index. Messages are read in batches from the queue
+%% index.
+purge1(AfterFun, State = #vqstate { q4 = Q4}) ->
+    State1 = remove_queue_entries(Q4, AfterFun, State),
+
+    State2 = #vqstate {q1 = Q1} =
+        purge_betas_and_deltas(AfterFun, State1#vqstate{q4 = ?QUEUE:new()}),
+
+    State3 = remove_queue_entries(Q1, AfterFun, State2),
+
+    a(State3#vqstate{q1 = ?QUEUE:new()}).
+
+reset_qi_state(State = #vqstate{index_state = IndexState}) ->
+    State#vqstate{index_state =
+                         rabbit_queue_index:reset_state(IndexState)}.
+
+is_pending_ack_empty(State) ->
+    count_pending_acks(State) =:= 0.
+
+count_pending_acks(#vqstate { ram_pending_ack   = RPA,
+                              disk_pending_ack  = DPA,
+                              qi_pending_ack    = QPA }) ->
+    gb_trees:size(RPA) + gb_trees:size(DPA) + gb_trees:size(QPA).
+
+purge_betas_and_deltas(DelsAndAcksFun, State = #vqstate { mode = Mode }) ->
+    State0 = #vqstate { q3 = Q3 } =
+        case Mode of
+            lazy -> maybe_deltas_to_betas(DelsAndAcksFun, State);
+            _    -> State
+        end,
+
+    case ?QUEUE:is_empty(Q3) of
+        true  -> State0;
+        false -> State1 = remove_queue_entries(Q3, DelsAndAcksFun, State0),
+                 purge_betas_and_deltas(DelsAndAcksFun,
+                                        maybe_deltas_to_betas(
+                                          DelsAndAcksFun,
+                                          State1#vqstate{q3 = ?QUEUE:new()}))
+    end.
+
+remove_queue_entries(Q, DelsAndAcksFun,
+                     State = #vqstate{msg_store_clients = MSCState}) ->
+    {MsgIdsByStore, Delivers, Acks, State1} =
+        ?QUEUE:foldl(fun remove_queue_entries1/2,
+                     {orddict:new(), [], [], State}, Q),
+    remove_msgs_by_id(MsgIdsByStore, MSCState),
+    DelsAndAcksFun(Delivers, Acks, State1).
+
+remove_queue_entries1(
+  #msg_status { msg_id = MsgId, seq_id = SeqId, is_delivered = IsDelivered,
+                msg_in_store = MsgInStore, index_on_disk = IndexOnDisk,
+                is_persistent = IsPersistent} = MsgStatus,
+  {MsgIdsByStore, Delivers, Acks, State}) ->
+    {case MsgInStore of
+         true  -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore);
+         false -> MsgIdsByStore
+     end,
+     cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
+     cons_if(IndexOnDisk, SeqId, Acks),
+     stats({-1, 0}, {MsgStatus, none}, State)}.
+
+process_delivers_and_acks_fun(deliver_and_ack) ->
+    fun (Delivers, Acks, State = #vqstate { index_state = IndexState }) ->
+            IndexState1 =
+                rabbit_queue_index:ack(
+                  Acks, rabbit_queue_index:deliver(Delivers, IndexState)),
+            State #vqstate { index_state = IndexState1 }
+    end;
+process_delivers_and_acks_fun(_) ->
+    fun (_, _, State) ->
+            State
+    end.
+
+%%----------------------------------------------------------------------------
+%% Internal gubbins for publishing
+%%----------------------------------------------------------------------------
+
+publish1(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
+         MsgProps = #message_properties { needs_confirming = NeedsConfirming },
+         IsDelivered, _ChPid, _Flow, PersistFun,
+         State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4,
+                            mode                = default,
+                            qi_embed_msgs_below = IndexMaxSize,
+                            next_seq_id         = SeqId,
+                            in_counter          = InCount,
+                            durable             = IsDurable,
+                            unconfirmed         = UC }) ->
+    IsPersistent1 = IsDurable andalso IsPersistent,
+    MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
+    {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State),
+    State2 = case ?QUEUE:is_empty(Q3) of
+                 false -> State1 #vqstate { q1 = ?QUEUE:in(m(MsgStatus1), Q1) };
+                 true  -> State1 #vqstate { q4 = ?QUEUE:in(m(MsgStatus1), Q4) }
+             end,
+    InCount1 = InCount + 1,
+    UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+    stats({1, 0}, {none, MsgStatus1},
+          State2#vqstate{ next_seq_id = SeqId + 1,
+                          in_counter  = InCount1,
+                          unconfirmed = UC1 });
+publish1(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
+             MsgProps = #message_properties { needs_confirming = NeedsConfirming },
+             IsDelivered, _ChPid, _Flow, PersistFun,
+             State = #vqstate { mode                = lazy,
+                                qi_embed_msgs_below = IndexMaxSize,
+                                next_seq_id         = SeqId,
+                                in_counter          = InCount,
+                                durable             = IsDurable,
+                                unconfirmed         = UC,
+                                delta               = Delta }) ->
+    IsPersistent1 = IsDurable andalso IsPersistent,
+    MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
+    {MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State),
+    Delta1 = expand_delta(SeqId, Delta),
+    UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+    stats(lazy_pub, {lazy, m(MsgStatus1)},
+          State1#vqstate{ delta       = Delta1,
+                          next_seq_id = SeqId + 1,
+                          in_counter  = InCount + 1,
+                          unconfirmed = UC1 }).
+
+batch_publish1({Msg, MsgProps, IsDelivered}, {ChPid, Flow, State}) ->
+    {ChPid, Flow, publish1(Msg, MsgProps, IsDelivered, ChPid, Flow,
+                           fun maybe_prepare_write_to_disk/4, State)}.
+
+publish_delivered1(Msg = #basic_message { is_persistent = IsPersistent,
+                                          id = MsgId },
+                   MsgProps = #message_properties {
+                                 needs_confirming = NeedsConfirming },
+                   _ChPid, _Flow, PersistFun,
+                   State = #vqstate { mode                = default,
+                                      qi_embed_msgs_below = IndexMaxSize,
+                                      next_seq_id         = SeqId,
+                                      out_counter         = OutCount,
+                                      in_counter          = InCount,
+                                      durable             = IsDurable,
+                                      unconfirmed         = UC }) ->
+    IsPersistent1 = IsDurable andalso IsPersistent,
+    MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
+    {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State),
+    State2 = record_pending_ack(m(MsgStatus1), State1),
+    UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+    State3 = stats({0, 1}, {none, MsgStatus1},
+                   State2 #vqstate { next_seq_id      = SeqId    + 1,
+                                     out_counter      = OutCount + 1,
+                                     in_counter       = InCount  + 1,
+                                     unconfirmed      = UC1 }),
+    {SeqId, State3};
+publish_delivered1(Msg = #basic_message { is_persistent = IsPersistent,
+                                          id = MsgId },
+                   MsgProps = #message_properties {
+                                 needs_confirming = NeedsConfirming },
+                   _ChPid, _Flow, PersistFun,
+                   State = #vqstate { mode                = lazy,
+                                      qi_embed_msgs_below = IndexMaxSize,
+                                      next_seq_id         = SeqId,
+                                      out_counter         = OutCount,
+                                      in_counter          = InCount,
+                                      durable             = IsDurable,
+                                      unconfirmed         = UC }) ->
+    IsPersistent1 = IsDurable andalso IsPersistent,
+    MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
+    {MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State),
+    State2 = record_pending_ack(m(MsgStatus1), State1),
+    UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+    State3 = stats({0, 1}, {none, MsgStatus1},
+                   State2 #vqstate { next_seq_id      = SeqId    + 1,
+                                     out_counter      = OutCount + 1,
+                                     in_counter       = InCount  + 1,
+                                     unconfirmed      = UC1 }),
+    {SeqId, State3}.
+
+batch_publish_delivered1({Msg, MsgProps}, {ChPid, Flow, SeqIds, State}) ->
+    {SeqId, State1} =
+        publish_delivered1(Msg, MsgProps, ChPid, Flow,
+                           fun maybe_prepare_write_to_disk/4,
+                           State),
+    {ChPid, Flow, [SeqId | SeqIds], State1}.
+
+maybe_write_msg_to_disk(_Force, MsgStatus = #msg_status {
+                                  msg_in_store = true }, State) ->
+    {MsgStatus, State};
+maybe_write_msg_to_disk(Force, MsgStatus = #msg_status {
+                                 msg = Msg, msg_id = MsgId,
+                                 is_persistent = IsPersistent },
+                        State = #vqstate{ msg_store_clients = MSCState,
+                                          disk_write_count  = Count})
+  when Force orelse IsPersistent ->
+    case persist_to(MsgStatus) of
+        msg_store   -> ok = msg_store_write(MSCState, IsPersistent, MsgId,
+                                            prepare_to_store(Msg)),
+                       {MsgStatus#msg_status{msg_in_store = true},
+                        State#vqstate{disk_write_count = Count + 1}};
+        queue_index -> {MsgStatus, State}
+    end;
+maybe_write_msg_to_disk(_Force, MsgStatus, State) ->
+    {MsgStatus, State}.
+
+%% Due to certain optimizations made inside
+%% rabbit_queue_index:pre_publish/7 we need to have two separate
+%% functions for index persistence. This one is only used when paging
+%% during memory pressure. We didn't want to modify
+%% maybe_write_index_to_disk/3 because that function is used in other
+%% places.
+maybe_batch_write_index_to_disk(_Force,
+                                MsgStatus = #msg_status {
+                                  index_on_disk = true }, State) ->
+    {MsgStatus, State};
+maybe_batch_write_index_to_disk(Force,
+                                MsgStatus = #msg_status {
+                                  msg           = Msg,
+                                  msg_id        = MsgId,
+                                  seq_id        = SeqId,
+                                  is_persistent = IsPersistent,
+                                  is_delivered  = IsDelivered,
+                                  msg_props     = MsgProps},
+                                State = #vqstate {
+                                           target_ram_count = TargetRamCount,
+                                           disk_write_count = DiskWriteCount,
+                                           index_state      = IndexState})
+  when Force orelse IsPersistent ->
+    {MsgOrId, DiskWriteCount1} =
+        case persist_to(MsgStatus) of
+            msg_store   -> {MsgId, DiskWriteCount};
+            queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1}
+        end,
+    IndexState1 = rabbit_queue_index:pre_publish(
+                    MsgOrId, SeqId, MsgProps, IsPersistent, IsDelivered,
+                    TargetRamCount, IndexState),
+    {MsgStatus#msg_status{index_on_disk = true},
+     State#vqstate{index_state      = IndexState1,
+                   disk_write_count = DiskWriteCount1}};
+maybe_batch_write_index_to_disk(_Force, MsgStatus, State) ->
+    {MsgStatus, State}.
+
+maybe_write_index_to_disk(_Force, MsgStatus = #msg_status {
+                                    index_on_disk = true }, State) ->
+    {MsgStatus, State};
+maybe_write_index_to_disk(Force, MsgStatus = #msg_status {
+                                   msg           = Msg,
+                                   msg_id        = MsgId,
+                                   seq_id        = SeqId,
+                                   is_persistent = IsPersistent,
+                                   is_delivered  = IsDelivered,
+                                   msg_props     = MsgProps},
+                          State = #vqstate{target_ram_count = TargetRamCount,
+                                           disk_write_count = DiskWriteCount,
+                                           index_state      = IndexState})
+  when Force orelse IsPersistent ->
+    {MsgOrId, DiskWriteCount1} =
+        case persist_to(MsgStatus) of
+            msg_store   -> {MsgId, DiskWriteCount};
+            queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1}
+        end,
+    IndexState1 = rabbit_queue_index:publish(
+                    MsgOrId, SeqId, MsgProps, IsPersistent, TargetRamCount,
+                    IndexState),
+    IndexState2 = maybe_write_delivered(IsDelivered, SeqId, IndexState1),
+    {MsgStatus#msg_status{index_on_disk = true},
+     State#vqstate{index_state      = IndexState2,
+                   disk_write_count = DiskWriteCount1}};
+
+maybe_write_index_to_disk(_Force, MsgStatus, State) ->
+    {MsgStatus, State}.
+
+maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) ->
+    {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
+    maybe_write_index_to_disk(ForceIndex, MsgStatus1, State1).
+
+maybe_prepare_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) ->
+    {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
+    maybe_batch_write_index_to_disk(ForceIndex, MsgStatus1, State1).
+
+determine_persist_to(#basic_message{
+                        content = #content{properties     = Props,
+                                           properties_bin = PropsBin}},
+                     #message_properties{size = BodySize},
+                     IndexMaxSize) ->
+    %% The >= is so that you can set the env to 0 and never persist
+    %% to the index.
+    %%
+    %% We want this to be fast, so we avoid size(term_to_binary())
+    %% here, or using the term size estimation from truncate.erl, both
+    %% of which are too slow. So instead, if the message body size
+    %% goes over the limit then we avoid any other checks.
+    %%
+    %% If it doesn't we need to decide if the properties will push
+    %% it past the limit. If we have the encoded properties (usual
+    %% case) we can just check their size. If we don't (message came
+    %% via the direct client), we make a guess based on the number of
+    %% headers.
+    case BodySize >= IndexMaxSize of
+        true  -> msg_store;
+        false -> Est = case is_binary(PropsBin) of
+                           true  -> BodySize + size(PropsBin);
+                           false -> #'P_basic'{headers = Hs} = Props,
+                                    case Hs of
+                                        undefined -> 0;
+                                        _         -> length(Hs)
+                                    end * ?HEADER_GUESS_SIZE + BodySize
+                       end,
+                 case Est >= IndexMaxSize of
+                     true  -> msg_store;
+                     false -> queue_index
+                 end
+    end.
+
+persist_to(#msg_status{persist_to = To}) -> To.
+
+prepare_to_store(Msg) ->
+    Msg#basic_message{
+      %% don't persist any recoverable decoded properties
+      content = rabbit_binary_parser:clear_decoded_content(
+                  Msg #basic_message.content)}.
+
+%%----------------------------------------------------------------------------
+%% Internal gubbins for acks
+%%----------------------------------------------------------------------------
+
+record_pending_ack(#msg_status { seq_id = SeqId } = MsgStatus,
+                   State = #vqstate { ram_pending_ack  = RPA,
+                                      disk_pending_ack = DPA,
+                                      qi_pending_ack   = QPA,
+                                      ack_in_counter   = AckInCount}) ->
+    Insert = fun (Tree) -> gb_trees:insert(SeqId, MsgStatus, Tree) end,
+    {RPA1, DPA1, QPA1} =
+        case {msg_in_ram(MsgStatus), persist_to(MsgStatus)} of
+            {false, _}           -> {RPA, Insert(DPA), QPA};
+            {_,     queue_index} -> {RPA, DPA, Insert(QPA)};
+            {_,     msg_store}   -> {Insert(RPA), DPA, QPA}
+        end,
+    State #vqstate { ram_pending_ack  = RPA1,
+                     disk_pending_ack = DPA1,
+                     qi_pending_ack   = QPA1,
+                     ack_in_counter   = AckInCount + 1}.
+
+lookup_pending_ack(SeqId, #vqstate { ram_pending_ack  = RPA,
+                                     disk_pending_ack = DPA,
+                                     qi_pending_ack   = QPA}) ->
+    case gb_trees:lookup(SeqId, RPA) of
+        {value, V} -> V;
+        none       -> case gb_trees:lookup(SeqId, DPA) of
+                          {value, V} -> V;
+                          none       -> gb_trees:get(SeqId, QPA)
+                      end
+    end.
+
+%% First parameter = UpdateStats
+remove_pending_ack(true, SeqId, State) ->
+    {MsgStatus, State1} = remove_pending_ack(false, SeqId, State),
+    {MsgStatus, stats({0, -1}, {MsgStatus, none}, State1)};
+remove_pending_ack(false, SeqId, State = #vqstate{ram_pending_ack  = RPA,
+                                                  disk_pending_ack = DPA,
+                                                  qi_pending_ack   = QPA}) ->
+    case gb_trees:lookup(SeqId, RPA) of
+        {value, V} -> RPA1 = gb_trees:delete(SeqId, RPA),
+                      {V, State #vqstate { ram_pending_ack = RPA1 }};
+        none       -> case gb_trees:lookup(SeqId, DPA) of
+                          {value, V} ->
+                              DPA1 = gb_trees:delete(SeqId, DPA),
+                              {V, State#vqstate{disk_pending_ack = DPA1}};
+                          none ->
+                              QPA1 = gb_trees:delete(SeqId, QPA),
+                              {gb_trees:get(SeqId, QPA),
+                               State#vqstate{qi_pending_ack = QPA1}}
+                      end
+    end.
+
+purge_pending_ack(KeepPersistent,
+                  State = #vqstate { index_state       = IndexState,
+                                     msg_store_clients = MSCState }) ->
+    {IndexOnDiskSeqIds, MsgIdsByStore, State1} = purge_pending_ack1(State),
+    case KeepPersistent of
+        true  -> remove_transient_msgs_by_id(MsgIdsByStore, MSCState),
+                 State1;
+        false -> IndexState1 =
+                     rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
+                 remove_msgs_by_id(MsgIdsByStore, MSCState),
+                 State1 #vqstate { index_state = IndexState1 }
+    end.
+
+purge_pending_ack_delete_and_terminate(
+  State = #vqstate { index_state       = IndexState,
+                     msg_store_clients = MSCState }) ->
+    {_, MsgIdsByStore, State1} = purge_pending_ack1(State),
+    IndexState1 = rabbit_queue_index:delete_and_terminate(IndexState),
+    remove_msgs_by_id(MsgIdsByStore, MSCState),
+    State1 #vqstate { index_state = IndexState1 }.
+
+purge_pending_ack1(State = #vqstate { ram_pending_ack   = RPA,
+                                      disk_pending_ack  = DPA,
+                                      qi_pending_ack    = QPA }) ->
+    F = fun (_SeqId, MsgStatus, Acc) -> accumulate_ack(MsgStatus, Acc) end,
+    {IndexOnDiskSeqIds, MsgIdsByStore, _AllMsgIds} =
+        rabbit_misc:gb_trees_fold(
+          F, rabbit_misc:gb_trees_fold(
+               F,  rabbit_misc:gb_trees_fold(
+                     F, accumulate_ack_init(), RPA), DPA), QPA),
+    State1 = State #vqstate { ram_pending_ack  = gb_trees:empty(),
+                              disk_pending_ack = gb_trees:empty(),
+                              qi_pending_ack   = gb_trees:empty()},
+    {IndexOnDiskSeqIds, MsgIdsByStore, State1}.
+
+%% MsgIdsByStore is an orddict with two keys:
+%%
+%% true: holds a list of Persistent Message Ids.
+%% false: holds a list of Transient Message Ids.
+%%
+%% When we call orddict:to_list/1 we get two sets of msg ids, where
+%% IsPersistent is either true for persistent messages or false for
+%% transient ones. The msg_store_remove/3 function takes this boolean
+%% flag to determine from which store the messages should be removed
+%% from.
+remove_msgs_by_id(MsgIdsByStore, MSCState) ->
+    [ok = msg_store_remove(MSCState, IsPersistent, MsgIds)
+     || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)].
+
+remove_transient_msgs_by_id(MsgIdsByStore, MSCState) ->
+    case orddict:find(false, MsgIdsByStore) of
+        error        -> ok;
+        {ok, MsgIds} -> ok = msg_store_remove(MSCState, false, MsgIds)
+    end.
+
+accumulate_ack_init() -> {[], orddict:new(), []}.
+
+accumulate_ack(#msg_status { seq_id        = SeqId,
+                             msg_id        = MsgId,
+                             is_persistent = IsPersistent,
+                             msg_in_store  = MsgInStore,
+                             index_on_disk = IndexOnDisk },
+               {IndexOnDiskSeqIdsAcc, MsgIdsByStore, AllMsgIds}) ->
+    {cons_if(IndexOnDisk, SeqId, IndexOnDiskSeqIdsAcc),
+     case MsgInStore of
+         true  -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore);
+         false -> MsgIdsByStore
+     end,
+     [MsgId | AllMsgIds]}.
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing for confirms (aka publisher acks)
+%%----------------------------------------------------------------------------
+
+record_confirms(MsgIdSet, State = #vqstate { msgs_on_disk        = MOD,
+                                             msg_indices_on_disk = MIOD,
+                                             unconfirmed         = UC,
+                                             confirmed           = C }) ->
+    State #vqstate {
+      msgs_on_disk        = rabbit_misc:gb_sets_difference(MOD,  MsgIdSet),
+      msg_indices_on_disk = rabbit_misc:gb_sets_difference(MIOD, MsgIdSet),
+      unconfirmed         = rabbit_misc:gb_sets_difference(UC,   MsgIdSet),
+      confirmed           = gb_sets:union(C, MsgIdSet) }.
+
+msgs_written_to_disk(Callback, MsgIdSet, ignored) ->
+    Callback(?MODULE,
+             fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end);
+msgs_written_to_disk(Callback, MsgIdSet, written) ->
+    Callback(?MODULE,
+             fun (?MODULE, State = #vqstate { msgs_on_disk        = MOD,
+                                              msg_indices_on_disk = MIOD,
+                                              unconfirmed         = UC }) ->
+                     Confirmed = gb_sets:intersection(UC, MsgIdSet),
+                     record_confirms(gb_sets:intersection(MsgIdSet, MIOD),
+                                     State #vqstate {
+                                       msgs_on_disk =
+                                           gb_sets:union(MOD, Confirmed) })
+             end).
+
+msg_indices_written_to_disk(Callback, MsgIdSet) ->
+    Callback(?MODULE,
+             fun (?MODULE, State = #vqstate { msgs_on_disk        = MOD,
+                                              msg_indices_on_disk = MIOD,
+                                              unconfirmed         = UC }) ->
+                     Confirmed = gb_sets:intersection(UC, MsgIdSet),
+                     record_confirms(gb_sets:intersection(MsgIdSet, MOD),
+                                     State #vqstate {
+                                       msg_indices_on_disk =
+                                           gb_sets:union(MIOD, Confirmed) })
+             end).
+
+msgs_and_indices_written_to_disk(Callback, MsgIdSet) ->
+    Callback(?MODULE,
+             fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end).
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing for requeue
+%%----------------------------------------------------------------------------
+
+publish_alpha(#msg_status { msg = undefined } = MsgStatus, State) ->
+    {Msg, State1} = read_msg(MsgStatus, State),
+    MsgStatus1 = MsgStatus#msg_status { msg = Msg },
+    {MsgStatus1, stats({1, -1}, {MsgStatus, MsgStatus1}, State1)};
+publish_alpha(MsgStatus, State) ->
+    {MsgStatus, stats({1, -1}, {MsgStatus, MsgStatus}, State)}.
+
+publish_beta(MsgStatus, State) ->
+    {MsgStatus1, State1} = maybe_write_to_disk(true, false, MsgStatus, State),
+    MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+    {MsgStatus2, stats({1, -1}, {MsgStatus, MsgStatus2}, State1)}.
+
+%% Rebuild queue, inserting sequence ids to maintain ordering
+queue_merge(SeqIds, Q, MsgIds, Limit, PubFun, State) ->
+    queue_merge(SeqIds, Q, ?QUEUE:new(), MsgIds,
+                Limit, PubFun, State).
+
+queue_merge([SeqId | Rest] = SeqIds, Q, Front, MsgIds,
+            Limit, PubFun, State)
+  when Limit == undefined orelse SeqId < Limit ->
+    case ?QUEUE:out(Q) of
+        {{value, #msg_status { seq_id = SeqIdQ } = MsgStatus}, Q1}
+          when SeqIdQ < SeqId ->
+            %% enqueue from the remaining queue
+            queue_merge(SeqIds, Q1, ?QUEUE:in(MsgStatus, Front), MsgIds,
+                        Limit, PubFun, State);
+        {_, _Q1} ->
+            %% enqueue from the remaining list of sequence ids
+            {MsgStatus, State1} = msg_from_pending_ack(SeqId, State),
+            {#msg_status { msg_id = MsgId } = MsgStatus1, State2} =
+                PubFun(MsgStatus, State1),
+            queue_merge(Rest, Q, ?QUEUE:in(MsgStatus1, Front), [MsgId | MsgIds],
+                        Limit, PubFun, State2)
+    end;
+queue_merge(SeqIds, Q, Front, MsgIds,
+            _Limit, _PubFun, State) ->
+    {SeqIds, ?QUEUE:join(Front, Q), MsgIds, State}.
+
+delta_merge([], Delta, MsgIds, State) ->
+    {Delta, MsgIds, State};
+delta_merge(SeqIds, Delta, MsgIds, State) ->
+    lists:foldl(fun (SeqId, {Delta0, MsgIds0, State0}) ->
+                        {#msg_status { msg_id = MsgId } = MsgStatus, State1} =
+                            msg_from_pending_ack(SeqId, State0),
+                        {_MsgStatus, State2} =
+                            maybe_write_to_disk(true, true, MsgStatus, State1),
+                        {expand_delta(SeqId, Delta0), [MsgId | MsgIds0],
+                         stats({1, -1}, {MsgStatus, none}, State2)}
+                end, {Delta, MsgIds, State}, SeqIds).
+
+%% Mostly opposite of record_pending_ack/2
+msg_from_pending_ack(SeqId, State) ->
+    {#msg_status { msg_props = MsgProps } = MsgStatus, State1} =
+        remove_pending_ack(false, SeqId, State),
+    {MsgStatus #msg_status {
+       msg_props = MsgProps #message_properties { needs_confirming = false } },
+     State1}.
+
+beta_limit(Q) ->
+    case ?QUEUE:peek(Q) of
+        {value, #msg_status { seq_id = SeqId }} -> SeqId;
+        empty                                   -> undefined
+    end.
+
+delta_limit(?BLANK_DELTA_PATTERN(_X))             -> undefined;
+delta_limit(#delta { start_seq_id = StartSeqId }) -> StartSeqId.
+
+%%----------------------------------------------------------------------------
+%% Iterator
+%%----------------------------------------------------------------------------
+
+ram_ack_iterator(State) ->
+    {ack, gb_trees:iterator(State#vqstate.ram_pending_ack)}.
+
+disk_ack_iterator(State) ->
+    {ack, gb_trees:iterator(State#vqstate.disk_pending_ack)}.
+
+qi_ack_iterator(State) ->
+    {ack, gb_trees:iterator(State#vqstate.qi_pending_ack)}.
+
+msg_iterator(State) -> istate(start, State).
+
+istate(start, State) -> {q4,    State#vqstate.q4,    State};
+istate(q4,    State) -> {q3,    State#vqstate.q3,    State};
+istate(q3,    State) -> {delta, State#vqstate.delta, State};
+istate(delta, State) -> {q2,    State#vqstate.q2,    State};
+istate(q2,    State) -> {q1,    State#vqstate.q1,    State};
+istate(q1,   _State) -> done.
+
+next({ack, It}, IndexState) ->
+    case gb_trees:next(It) of
+        none                     -> {empty, IndexState};
+        {_SeqId, MsgStatus, It1} -> Next = {ack, It1},
+                                    {value, MsgStatus, true, Next, IndexState}
+    end;
+next(done, IndexState) -> {empty, IndexState};
+next({delta, #delta{start_seq_id = SeqId,
+                    end_seq_id   = SeqId}, State}, IndexState) ->
+    next(istate(delta, State), IndexState);
+next({delta, #delta{start_seq_id = SeqId,
+                    end_seq_id   = SeqIdEnd} = Delta, State}, IndexState) ->
+    SeqIdB = rabbit_queue_index:next_segment_boundary(SeqId),
+    SeqId1 = lists:min([SeqIdB, SeqIdEnd]),
+    {List, IndexState1} = rabbit_queue_index:read(SeqId, SeqId1, IndexState),
+    next({delta, Delta#delta{start_seq_id = SeqId1}, List, State}, IndexState1);
+next({delta, Delta, [], State}, IndexState) ->
+    next({delta, Delta, State}, IndexState);
+next({delta, Delta, [{_, SeqId, _, _, _} = M | Rest], State}, IndexState) ->
+    case is_msg_in_pending_acks(SeqId, State) of
+        false -> Next = {delta, Delta, Rest, State},
+                 {value, beta_msg_status(M), false, Next, IndexState};
+        true  -> next({delta, Delta, Rest, State}, IndexState)
+    end;
+next({Key, Q, State}, IndexState) ->
+    case ?QUEUE:out(Q) of
+        {empty, _Q}              -> next(istate(Key, State), IndexState);
+        {{value, MsgStatus}, QN} -> Next = {Key, QN, State},
+                                    {value, MsgStatus, false, Next, IndexState}
+    end.
+
+inext(It, {Its, IndexState}) ->
+    case next(It, IndexState) of
+        {empty, IndexState1} ->
+            {Its, IndexState1};
+        {value, MsgStatus1, Unacked, It1, IndexState1} ->
+            {[{MsgStatus1, Unacked, It1} | Its], IndexState1}
+    end.
+
+ifold(_Fun, Acc, [], State) ->
+    {Acc, State};
+ifold(Fun, Acc, Its, State) ->
+    [{MsgStatus, Unacked, It} | Rest] =
+        lists:sort(fun ({#msg_status{seq_id = SeqId1}, _, _},
+                        {#msg_status{seq_id = SeqId2}, _, _}) ->
+                           SeqId1 =< SeqId2
+                   end, Its),
+    {Msg, State1} = read_msg(MsgStatus, State),
+    case Fun(Msg, MsgStatus#msg_status.msg_props, Unacked, Acc) of
+        {stop, Acc1} ->
+            {Acc1, State};
+        {cont, Acc1} ->
+            {Its1, IndexState1} = inext(It, {Rest, State1#vqstate.index_state}),
+            ifold(Fun, Acc1, Its1, State1#vqstate{index_state = IndexState1})
+    end.
+
+%%----------------------------------------------------------------------------
+%% Phase changes
+%%----------------------------------------------------------------------------
+
+reduce_memory_use(State = #vqstate { target_ram_count = infinity }) ->
+    State;
+reduce_memory_use(State = #vqstate {
+                    mode             = default,
+                    ram_pending_ack  = RPA,
+                    ram_msg_count    = RamMsgCount,
+                    target_ram_count = TargetRamCount,
+                    io_batch_size    = IoBatchSize,
+                    rates            = #rates { in      = AvgIngress,
+                                                out     = AvgEgress,
+                                                ack_in  = AvgAckIngress,
+                                                ack_out = AvgAckEgress } }) ->
+
+    State1 = #vqstate { q2 = Q2, q3 = Q3 } =
+        case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of
+            0  -> State;
+            %% Reduce memory of pending acks and alphas. The order is
+            %% determined based on which is growing faster. Whichever
+            %% comes second may very well get a quota of 0 if the
+            %% first manages to push out the max number of messages.
+            S1 -> Funs = case ((AvgAckIngress - AvgAckEgress) >
+                                   (AvgIngress - AvgEgress)) of
+                             true  -> [fun limit_ram_acks/2,
+                                       fun push_alphas_to_betas/2];
+                             false -> [fun push_alphas_to_betas/2,
+                                       fun limit_ram_acks/2]
+                         end,
+                  {_, State2} = lists:foldl(fun (ReduceFun, {QuotaN, StateN}) ->
+                                                    ReduceFun(QuotaN, StateN)
+                                            end, {S1, State}, Funs),
+                  State2
+        end,
+
+    State3 =
+        case chunk_size(?QUEUE:len(Q2) + ?QUEUE:len(Q3),
+                        permitted_beta_count(State1)) of
+            S2 when S2 >= IoBatchSize ->
+                %% There is an implicit, but subtle, upper bound here. We
+                %% may shuffle a lot of messages from Q2/3 into delta, but
+                %% the number of these that require any disk operation,
+                %% namely index writing, i.e. messages that are genuine
+                %% betas and not gammas, is bounded by the credit_flow
+                %% limiting of the alpha->beta conversion above.
+                push_betas_to_deltas(S2, State1);
+            _  ->
+                State1
+        end,
+    %% See rabbitmq-server-290 for the reasons behind this GC call.
+    garbage_collect(),
+    State3;
+%% When using lazy queues, there are no alphas, so we don't need to
+%% call push_alphas_to_betas/2.
+reduce_memory_use(State = #vqstate {
+                             mode = lazy,
+                             ram_pending_ack  = RPA,
+                             ram_msg_count    = RamMsgCount,
+                             target_ram_count = TargetRamCount }) ->
+    State1 = #vqstate { q3 = Q3 } =
+        case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of
+            0  -> State;
+            S1 -> {_, State2} = limit_ram_acks(S1, State),
+                  State2
+        end,
+
+    State3 =
+        case chunk_size(?QUEUE:len(Q3),
+                        permitted_beta_count(State1)) of
+            0  ->
+                State1;
+            S2 ->
+                push_betas_to_deltas(S2, State1)
+        end,
+    garbage_collect(),
+    State3.
+
+limit_ram_acks(0, State) ->
+    {0, ui(State)};
+limit_ram_acks(Quota, State = #vqstate { ram_pending_ack  = RPA,
+                                         disk_pending_ack = DPA }) ->
+    case gb_trees:is_empty(RPA) of
+        true ->
+            {Quota, ui(State)};
+        false ->
+            {SeqId, MsgStatus, RPA1} = gb_trees:take_largest(RPA),
+            {MsgStatus1, State1} =
+                maybe_prepare_write_to_disk(true, false, MsgStatus, State),
+            MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+            DPA1 = gb_trees:insert(SeqId, MsgStatus2, DPA),
+            limit_ram_acks(Quota - 1,
+                           stats({0, 0}, {MsgStatus, MsgStatus2},
+                                 State1 #vqstate { ram_pending_ack  = RPA1,
+                                                   disk_pending_ack = DPA1 }))
+    end.
+
+permitted_beta_count(#vqstate { len = 0 }) ->
+    infinity;
+permitted_beta_count(#vqstate { mode             = lazy,
+                                target_ram_count = TargetRamCount}) ->
+    TargetRamCount;
+permitted_beta_count(#vqstate { target_ram_count = 0, q3 = Q3 }) ->
+    lists:min([?QUEUE:len(Q3), rabbit_queue_index:next_segment_boundary(0)]);
+permitted_beta_count(#vqstate { q1               = Q1,
+                                q4               = Q4,
+                                target_ram_count = TargetRamCount,
+                                len              = Len }) ->
+    BetaDelta = Len - ?QUEUE:len(Q1) - ?QUEUE:len(Q4),
+    lists:max([rabbit_queue_index:next_segment_boundary(0),
+               BetaDelta - ((BetaDelta * BetaDelta) div
+                                (BetaDelta + TargetRamCount))]).
+
+chunk_size(Current, Permitted)
+  when Permitted =:= infinity orelse Permitted >= Current ->
+    0;
+chunk_size(Current, Permitted) ->
+    Current - Permitted.
+
+fetch_from_q3(State = #vqstate { mode  = default,
+                                 q1    = Q1,
+                                 q2    = Q2,
+                                 delta = #delta { count = DeltaCount },
+                                 q3    = Q3,
+                                 q4    = Q4 }) ->
+    case ?QUEUE:out(Q3) of
+        {empty, _Q3} ->
+            {empty, State};
+        {{value, MsgStatus}, Q3a} ->
+            State1 = State #vqstate { q3 = Q3a },
+            State2 = case {?QUEUE:is_empty(Q3a), 0 == DeltaCount} of
+                         {true, true} ->
+                             %% q3 is now empty, it wasn't before;
+                             %% delta is still empty. So q2 must be
+                             %% empty, and we know q4 is empty
+                             %% otherwise we wouldn't be loading from
+                             %% q3. As such, we can just set q4 to Q1.
+                             true = ?QUEUE:is_empty(Q2), %% ASSERTION
+                             true = ?QUEUE:is_empty(Q4), %% ASSERTION
+                             State1 #vqstate { q1 = ?QUEUE:new(), q4 = Q1 };
+                         {true, false} ->
+                             maybe_deltas_to_betas(State1);
+                         {false, _} ->
+                             %% q3 still isn't empty, we've not
+                             %% touched delta, so the invariants
+                             %% between q1, q2, delta and q3 are
+                             %% maintained
+                             State1
+                     end,
+            {loaded, {MsgStatus, State2}}
+    end;
+%% lazy queues
+fetch_from_q3(State = #vqstate { mode  = lazy,
+                                 delta = #delta { count = DeltaCount },
+                                 q3    = Q3 }) ->
+    case ?QUEUE:out(Q3) of
+        {empty, _Q3} when DeltaCount =:= 0 ->
+            {empty, State};
+        {empty, _Q3} ->
+            fetch_from_q3(maybe_deltas_to_betas(State));
+        {{value, MsgStatus}, Q3a} ->
+            State1 = State #vqstate { q3 = Q3a },
+            {loaded, {MsgStatus, State1}}
+    end.
+
+maybe_deltas_to_betas(State) ->
+    AfterFun = process_delivers_and_acks_fun(deliver_and_ack),
+    maybe_deltas_to_betas(AfterFun, State).
+
+maybe_deltas_to_betas(_DelsAndAcksFun,
+                      State = #vqstate {delta = ?BLANK_DELTA_PATTERN(X) }) ->
+    State;
+maybe_deltas_to_betas(DelsAndAcksFun,
+                      State = #vqstate {
+                        q2                   = Q2,
+                        delta                = Delta,
+                        q3                   = Q3,
+                        index_state          = IndexState,
+                        ram_msg_count        = RamMsgCount,
+                        ram_bytes            = RamBytes,
+                        disk_read_count      = DiskReadCount,
+                        transient_threshold  = TransientThreshold }) ->
+    #delta { start_seq_id = DeltaSeqId,
+             count        = DeltaCount,
+             end_seq_id   = DeltaSeqIdEnd } = Delta,
+    DeltaSeqId1 =
+        lists:min([rabbit_queue_index:next_segment_boundary(DeltaSeqId),
+                   DeltaSeqIdEnd]),
+    {List, IndexState1} = rabbit_queue_index:read(DeltaSeqId, DeltaSeqId1,
+                                                  IndexState),
+    {Q3a, RamCountsInc, RamBytesInc, State1} =
+        betas_from_index_entries(List, TransientThreshold,
+                                 DelsAndAcksFun,
+                                 State #vqstate { index_state = IndexState1 }),
+    State2 = State1 #vqstate { ram_msg_count     = RamMsgCount   + RamCountsInc,
+                               ram_bytes         = RamBytes      + RamBytesInc,
+                               disk_read_count   = DiskReadCount + RamCountsInc },
+    case ?QUEUE:len(Q3a) of
+        0 ->
+            %% we ignored every message in the segment due to it being
+            %% transient and below the threshold
+            maybe_deltas_to_betas(
+              DelsAndAcksFun,
+              State2 #vqstate {
+                delta = d(Delta #delta { start_seq_id = DeltaSeqId1 })});
+        Q3aLen ->
+            Q3b = ?QUEUE:join(Q3, Q3a),
+            case DeltaCount - Q3aLen of
+                0 ->
+                    %% delta is now empty, but it wasn't before, so
+                    %% can now join q2 onto q3
+                    State2 #vqstate { q2    = ?QUEUE:new(),
+                                      delta = ?BLANK_DELTA,
+                                      q3    = ?QUEUE:join(Q3b, Q2) };
+                N when N > 0 ->
+                    Delta1 = d(#delta { start_seq_id = DeltaSeqId1,
+                                        count        = N,
+                                        end_seq_id   = DeltaSeqIdEnd }),
+                    State2 #vqstate { delta = Delta1,
+                                      q3    = Q3b }
+            end
+    end.
+
+push_alphas_to_betas(Quota, State) ->
+    {Quota1, State1} =
+        push_alphas_to_betas(
+          fun ?QUEUE:out/1,
+          fun (MsgStatus, Q1a,
+               State0 = #vqstate { q3 = Q3, delta = #delta { count = 0 } }) ->
+                  State0 #vqstate { q1 = Q1a, q3 = ?QUEUE:in(MsgStatus, Q3) };
+              (MsgStatus, Q1a, State0 = #vqstate { q2 = Q2 }) ->
+                  State0 #vqstate { q1 = Q1a, q2 = ?QUEUE:in(MsgStatus, Q2) }
+          end, Quota, State #vqstate.q1, State),
+    {Quota2, State2} =
+        push_alphas_to_betas(
+          fun ?QUEUE:out_r/1,
+          fun (MsgStatus, Q4a, State0 = #vqstate { q3 = Q3 }) ->
+                  State0 #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3), q4 = Q4a }
+          end, Quota1, State1 #vqstate.q4, State1),
+    {Quota2, State2}.
+
+push_alphas_to_betas(_Generator, _Consumer, Quota, _Q,
+                     State = #vqstate { ram_msg_count    = RamMsgCount,
+                                        target_ram_count = TargetRamCount })
+  when Quota =:= 0 orelse
+       TargetRamCount =:= infinity orelse
+       TargetRamCount >= RamMsgCount ->
+    {Quota, ui(State)};
+push_alphas_to_betas(Generator, Consumer, Quota, Q, State) ->
+    %% We consume credits from the message_store whenever we need to
+    %% persist a message to disk. See:
+    %% rabbit_variable_queue:msg_store_write/4. So perhaps the
+    %% msg_store is trying to throttle down our queue.
+    case credit_flow:blocked() of
+        true  -> {Quota, ui(State)};
+        false -> case Generator(Q) of
+                     {empty, _Q} ->
+                         {Quota, ui(State)};
+                     {{value, MsgStatus}, Qa} ->
+                         {MsgStatus1, State1} =
+                             maybe_prepare_write_to_disk(true, false, MsgStatus,
+                                                         State),
+                         MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+                         State2 = stats(
+                                    ready0, {MsgStatus, MsgStatus2}, State1),
+                         State3 = Consumer(MsgStatus2, Qa, State2),
+                         push_alphas_to_betas(Generator, Consumer, Quota - 1,
+                                              Qa, State3)
+                 end
+    end.
+
+push_betas_to_deltas(Quota, State = #vqstate { mode  = default,
+                                               q2    = Q2,
+                                               delta = Delta,
+                                               q3    = Q3}) ->
+    PushState = {Quota, Delta, State},
+    {Q3a, PushState1} = push_betas_to_deltas(
+                          fun ?QUEUE:out_r/1,
+                          fun rabbit_queue_index:next_segment_boundary/1,
+                          Q3, PushState),
+    {Q2a, PushState2} = push_betas_to_deltas(
+                          fun ?QUEUE:out/1,
+                          fun (Q2MinSeqId) -> Q2MinSeqId end,
+                          Q2, PushState1),
+    {_, Delta1, State1} = PushState2,
+    State1 #vqstate { q2    = Q2a,
+                      delta = Delta1,
+                      q3    = Q3a };
+%% In the case of lazy queues we want to page as many messages as
+%% possible from q3.
+push_betas_to_deltas(Quota, State = #vqstate { mode  = lazy,
+                                               delta = Delta,
+                                               q3    = Q3}) ->
+    PushState = {Quota, Delta, State},
+    {Q3a, PushState1} = push_betas_to_deltas(
+                          fun ?QUEUE:out_r/1,
+                          fun (Q2MinSeqId) -> Q2MinSeqId end,
+                          Q3, PushState),
+    {_, Delta1, State1} = PushState1,
+    State1 #vqstate { delta = Delta1,
+                      q3    = Q3a }.
+
+
+push_betas_to_deltas(Generator, LimitFun, Q, PushState) ->
+    case ?QUEUE:is_empty(Q) of
+        true ->
+            {Q, PushState};
+        false ->
+            {value, #msg_status { seq_id = MinSeqId }} = ?QUEUE:peek(Q),
+            {value, #msg_status { seq_id = MaxSeqId }} = ?QUEUE:peek_r(Q),
+            Limit = LimitFun(MinSeqId),
+            case MaxSeqId < Limit of
+                true  -> {Q, PushState};
+                false -> push_betas_to_deltas1(Generator, Limit, Q, PushState)
+            end
+    end.
+
+push_betas_to_deltas1(_Generator, _Limit, Q, {0, Delta, State}) ->
+    {Q, {0, Delta, ui(State)}};
+push_betas_to_deltas1(Generator, Limit, Q, {Quota, Delta, State}) ->
+    case Generator(Q) of
+        {empty, _Q} ->
+            {Q, {Quota, Delta, ui(State)}};
+        {{value, #msg_status { seq_id = SeqId }}, _Qa}
+          when SeqId < Limit ->
+            {Q, {Quota, Delta, ui(State)}};
+        {{value, MsgStatus = #msg_status { seq_id = SeqId }}, Qa} ->
+            {#msg_status { index_on_disk = true }, State1} =
+                maybe_batch_write_index_to_disk(true, MsgStatus, State),
+            State2 = stats(ready0, {MsgStatus, none}, State1),
+            Delta1 = expand_delta(SeqId, Delta),
+            push_betas_to_deltas1(Generator, Limit, Qa,
+                                  {Quota - 1, Delta1, State2})
+    end.
+
+%% Flushes queue index batch caches and updates queue index state.
+ui(#vqstate{index_state      = IndexState,
+            target_ram_count = TargetRamCount} = State) ->
+    IndexState1 = rabbit_queue_index:flush_pre_publish_cache(
+                    TargetRamCount, IndexState),
+    State#vqstate{index_state = IndexState1}.
+
+%% Delay
+maybe_delay(QPA) ->
+  case is_timeout_test(gb_trees:values(QPA)) of
+    true -> receive
+              %% The queue received an EXIT message, it's probably the
+              %% node being stopped with "rabbitmqctl stop". Thus, abort
+              %% the wait and requeue the EXIT message.
+              {'EXIT', _, shutdown} = ExitMsg -> self() ! ExitMsg,
+                                                 void
+            after infinity -> void
+            end;
+    _ -> void
+  end.
+
+is_timeout_test([]) -> false;
+is_timeout_test([#msg_status{
+                    msg = #basic_message{
+                             content = #content{
+                                          payload_fragments_rev = PFR}}}|Rem]) ->
+  case lists:member(?TIMEOUT_TEST_MSG, PFR) of
+    T = true -> T;
+    _        -> is_timeout_test(Rem)
+  end;
+is_timeout_test([_|Rem]) -> is_timeout_test(Rem).
+
+%%----------------------------------------------------------------------------
+%% Upgrading
+%%----------------------------------------------------------------------------
+
+multiple_routing_keys() ->
+    transform_storage(
+      fun ({basic_message, ExchangeName, Routing_Key, Content,
+            MsgId, Persistent}) ->
+              {ok, {basic_message, ExchangeName, [Routing_Key], Content,
+                    MsgId, Persistent}};
+          (_) -> {error, corrupt_message}
+      end),
+    ok.
+
+
+%% Assumes message store is not running
+transform_storage(TransformFun) ->
+    transform_store(?PERSISTENT_MSG_STORE, TransformFun),
+    transform_store(?TRANSIENT_MSG_STORE, TransformFun).
+
+transform_store(Store, TransformFun) ->
+    rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store),
+    rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun).
diff --git a/rabbitmq-server/test/cluster_rename_SUITE.erl b/rabbitmq-server/test/cluster_rename_SUITE.erl
new file mode 100644 (file)
index 0000000..8ce29a6
--- /dev/null
@@ -0,0 +1,304 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(cluster_rename_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+    [
+      {group, cluster_size_2},
+      {group, cluster_size_3}
+    ].
+
+groups() ->
+    [
+      {cluster_size_2, [], [
+          % XXX post_change_nodename,
+          abortive_rename,
+          rename_fail,
+          rename_twice_fail
+        ]},
+      {cluster_size_3, [], [
+          rename_cluster_one_by_one,
+          rename_cluster_big_bang,
+          partial_one_by_one,
+          partial_big_bang
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+    rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 2} %% Replaced with a list of node names later.
+      ]);
+init_per_group(cluster_size_3, Config) ->
+    rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 3} %% Replaced with a list of node names later.
+      ]).
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase),
+    ClusterSize = ?config(rmq_nodes_count, Config),
+    Nodenames = [
+      list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I]))
+      || I <- lists:seq(1, ClusterSize)
+    ],
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, Nodenames},
+        {rmq_nodes_clustered, true}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+    Config1 = case rabbit_ct_helpers:get_config(Config, save_config) of
+        undefined -> Config;
+        C         -> C
+    end,
+    Config2 = rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config2, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%% Rolling rename of a cluster, each node should do a secondary rename.
+rename_cluster_one_by_one(Config) ->
+    [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(
+      Config, nodename),
+    publish_all(Config,
+      [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+    Config1 = stop_rename_start(Config,  Node1, [Node1, jessica]),
+    Config2 = stop_rename_start(Config1, Node2, [Node2, hazel]),
+    Config3 = stop_rename_start(Config2, Node3, [Node3, flopsy]),
+
+    [Jessica, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(
+      Config3, nodename),
+    consume_all(Config3,
+      [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+    {save_config, Config3}.
+
+%% Big bang rename of a cluster, Node1 should do a primary rename.
+rename_cluster_big_bang(Config) ->
+    [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    publish_all(Config,
+      [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Node3),
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Node2),
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+
+    Map = [Node1, jessica, Node2, hazel, Node3, flopsy],
+    Config1 = rename_node(Config,  Node1, Map),
+    Config2 = rename_node(Config1, Node2, Map),
+    Config3 = rename_node(Config2, Node3, Map),
+
+    [Jessica, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(
+      Config3, nodename),
+    ok = rabbit_ct_broker_helpers:start_node(Config3, Jessica),
+    ok = rabbit_ct_broker_helpers:start_node(Config3, Hazel),
+    ok = rabbit_ct_broker_helpers:start_node(Config3, Flopsy),
+
+    consume_all(Config3,
+      [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+    {save_config, Config3}.
+
+%% Here we test that Node1 copes with things being renamed around it.
+partial_one_by_one(Config) ->
+    [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    publish_all(Config,
+      [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+    Config1 = stop_rename_start(Config,  Node1, [Node1, jessica]),
+    Config2 = stop_rename_start(Config1, Node2, [Node2, hazel]),
+
+    [Jessica, Hazel, Node3] = rabbit_ct_broker_helpers:get_node_configs(
+      Config2, nodename),
+    consume_all(Config2,
+      [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Node3, <<"3">>}]),
+    {save_config, Config2}.
+
+%% Here we test that Node1 copes with things being renamed around it.
+partial_big_bang(Config) ->
+    [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    publish_all(Config,
+      [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Node3),
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Node2),
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+
+    Map = [Node2, hazel, Node3, flopsy],
+    Config1 = rename_node(Config,  Node2, Map),
+    Config2 = rename_node(Config1, Node3, Map),
+
+    [Node1, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(Config2,
+      nodename),
+    ok = rabbit_ct_broker_helpers:start_node(Config2, Node1),
+    ok = rabbit_ct_broker_helpers:start_node(Config2, Hazel),
+    ok = rabbit_ct_broker_helpers:start_node(Config2, Flopsy),
+
+    consume_all(Config2,
+      [{Node1, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+    {save_config, Config2}.
+
+% XXX %% We should be able to specify the -n parameter on ctl with either
+% XXX %% the before or after name for the local node (since in real cases
+% XXX %% one might want to invoke the command before or after the hostname
+% XXX %% has changed) - usually we test before so here we test after.
+% XXX post_change_nodename([Node1, _Bigwig]) ->
+% XXX     publish(Node1, <<"Node1">>),
+% XXX
+% XXX     Bugs1    = rabbit_test_configs:stop_node(Node1),
+% XXX     Bugs2    = [{nodename, jessica} | proplists:delete(nodename, Bugs1)],
+% XXX     Jessica0 = rename_node(Bugs2, jessica, [Node1, jessica]),
+% XXX     Jessica  = rabbit_test_configs:start_node(Jessica0),
+% XXX
+% XXX     consume(Jessica, <<"Node1">>),
+% XXX     stop_all([Jessica]),
+% XXX     ok.
+
+%% If we invoke rename but the node name does not actually change, we
+%% should roll back.
+abortive_rename(Config) ->
+    Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    publish(Config, Node1,  <<"Node1">>),
+
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+    _Config1 = rename_node(Config, Node1, [Node1, jessica]),
+    ok = rabbit_ct_broker_helpers:start_node(Config, Node1),
+
+    consume(Config, Node1, <<"Node1">>),
+    ok.
+
+%% And test some ways the command can fail.
+rename_fail(Config) ->
+    [Node1, Node2] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+    %% Rename from a node that does not exist
+    ok = rename_node_fail(Config, Node1, [bugzilla, jessica]),
+    %% Rename to a node which does
+    ok = rename_node_fail(Config, Node1, [Node1, Node2]),
+    %% Rename two nodes to the same thing
+    ok = rename_node_fail(Config, Node1, [Node1, jessica, Node2, jessica]),
+    %% Rename while impersonating a node not in the cluster
+    Config1 = rabbit_ct_broker_helpers:set_node_config(Config, Node1,
+      {nodename, 'rabbit@localhost'}),
+    ok = rename_node_fail(Config1, Node1, [Node1, jessica]),
+    ok.
+
+rename_twice_fail(Config) ->
+    Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+    Config1 = rename_node(Config, Node1, [Node1, indecisive]),
+    ok = rename_node_fail(Config, Node1, [indecisive, jessica]),
+    {save_config, Config1}.
+
+%% ----------------------------------------------------------------------------
+
+stop_rename_start(Config, Nodename, Map) ->
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Nodename),
+    Config1 = rename_node(Config, Nodename, Map),
+    ok = rabbit_ct_broker_helpers:start_node(Config1, Nodename),
+    Config1.
+
+rename_node(Config, Nodename, Map) ->
+    {ok, Config1} = do_rename_node(Config, Nodename, Map),
+    Config1.
+
+rename_node_fail(Config, Nodename, Map) ->
+    error = do_rename_node(Config, Nodename, Map),
+    ok.
+
+do_rename_node(Config, Nodename, Map) ->
+    Map1 = [
+      begin
+          NStr = atom_to_list(N),
+          case lists:member($@, NStr) of
+              true  -> N;
+              false -> rabbit_nodes:make({NStr, "localhost"})
+          end
+      end
+      || N <- Map
+    ],
+    Ret = rabbit_ct_broker_helpers:rabbitmqctl(Config, Nodename,
+      ["rename_cluster_node" | Map1]),
+    case Ret of
+        {ok, _} ->
+            Config1 = update_config_after_rename(Config, Map1),
+            {ok, Config1};
+        {error, _, _} ->
+            error
+    end.
+
+update_config_after_rename(Config, [Old, New | Rest]) ->
+    Config1 = rabbit_ct_broker_helpers:set_node_config(Config, Old,
+      {nodename, New}),
+    update_config_after_rename(Config1, Rest);
+update_config_after_rename(Config, []) ->
+    Config.
+
+publish(Config, Node, Q) ->
+    Ch = rabbit_ct_client_helpers:open_channel(Config, Node),
+    amqp_channel:call(Ch, #'confirm.select'{}),
+    amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}),
+    amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+                      #amqp_msg{props   = #'P_basic'{delivery_mode = 2},
+                                payload = Q}),
+    amqp_channel:wait_for_confirms(Ch),
+    rabbit_ct_client_helpers:close_channels_and_connection(Config, Node).
+
+consume(Config, Node, Q) ->
+    Ch = rabbit_ct_client_helpers:open_channel(Config, Node),
+    amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}),
+    {#'basic.get_ok'{}, #amqp_msg{payload = Q}} =
+        amqp_channel:call(Ch, #'basic.get'{queue = Q}),
+    rabbit_ct_client_helpers:close_channels_and_connection(Config, Node).
+
+
+publish_all(Config, Nodes) ->
+    [publish(Config, Node, Key) || {Node, Key} <- Nodes].
+
+consume_all(Config, Nodes) ->
+    [consume(Config, Node, Key) || {Node, Key} <- Nodes].
+
+set_node(Nodename, Cfg) ->
+    [{nodename, Nodename} | proplists:delete(nodename, Cfg)].
diff --git a/rabbitmq-server/test/clustering_management_SUITE.erl b/rabbitmq-server/test/clustering_management_SUITE.erl
new file mode 100644 (file)
index 0000000..00ddfa4
--- /dev/null
@@ -0,0 +1,728 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(clustering_management_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(LOOP_RECURSION_DELAY, 100).
+
+all() ->
+    [
+      {group, unclustered},
+      {group, clustered}
+    ].
+
+groups() ->
+    [
+      {unclustered, [], [
+          {cluster_size_2, [], [
+              erlang_config
+            ]},
+          {cluster_size_3, [], [
+              join_and_part_cluster,
+              join_cluster_bad_operations,
+              join_to_start_interval,
+              forget_cluster_node,
+              change_cluster_node_type,
+              change_cluster_when_node_offline,
+              update_cluster_nodes,
+              force_reset_node
+            ]}
+        ]},
+      {clustered, [], [
+          {cluster_size_2, [], [
+              forget_removes_things,
+              reset_removes_things,
+              forget_offline_removes_things,
+              force_boot,
+              status_with_alarm
+            ]},
+          {cluster_size_4, [], [
+              forget_promotes_offline_slave
+            ]}
+        ]}
+    ].
+
+suite() ->
+    [
+      %% If a test hangs, no need to wait for 30 minutes.
+      {timetrap, {minutes, 5}}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(unclustered, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]);
+init_per_group(clustered, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(cluster_size_2, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]);
+init_per_group(cluster_size_4, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 4}]).
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase),
+    ClusterSize = ?config(rmq_nodes_count, Config),
+    TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, Testcase},
+        {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+    Config1 = rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+join_and_part_cluster(Config) ->
+    [Rabbit, Hare, Bunny] = cluster_members(Config),
+    assert_not_clustered(Rabbit),
+    assert_not_clustered(Hare),
+    assert_not_clustered(Bunny),
+
+    stop_join_start(Rabbit, Bunny),
+    assert_clustered([Rabbit, Bunny]),
+
+    stop_join_start(Hare, Bunny, true),
+    assert_cluster_status(
+      {[Bunny, Hare, Rabbit], [Bunny, Rabbit], [Bunny, Hare, Rabbit]},
+      [Rabbit, Hare, Bunny]),
+
+    %% Allow clustering with already clustered node
+    ok = stop_app(Rabbit),
+    {ok, already_member} = join_cluster(Rabbit, Hare),
+    ok = start_app(Rabbit),
+
+    stop_reset_start(Rabbit),
+    assert_not_clustered(Rabbit),
+    assert_cluster_status({[Bunny, Hare], [Bunny], [Bunny, Hare]},
+                          [Hare, Bunny]),
+
+    stop_reset_start(Hare),
+    assert_not_clustered(Hare),
+    assert_not_clustered(Bunny).
+
+join_cluster_bad_operations(Config) ->
+    [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+    %% Non-existant node
+    ok = stop_app(Rabbit),
+    assert_failure(fun () -> join_cluster(Rabbit, non@existant) end),
+    ok = start_app(Rabbit),
+    assert_not_clustered(Rabbit),
+
+    %% Trying to cluster with mnesia running
+    assert_failure(fun () -> join_cluster(Rabbit, Bunny) end),
+    assert_not_clustered(Rabbit),
+
+    %% Trying to cluster the node with itself
+    ok = stop_app(Rabbit),
+    assert_failure(fun () -> join_cluster(Rabbit, Rabbit) end),
+    ok = start_app(Rabbit),
+    assert_not_clustered(Rabbit),
+
+    %% Do not let the node leave the cluster or reset if it's the only
+    %% ram node
+    stop_join_start(Hare, Rabbit, true),
+    assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+                          [Rabbit, Hare]),
+    ok = stop_app(Hare),
+    assert_failure(fun () -> join_cluster(Rabbit, Bunny) end),
+    assert_failure(fun () -> reset(Rabbit) end),
+    ok = start_app(Hare),
+    assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+                          [Rabbit, Hare]),
+
+    %% Cannot start RAM-only node first
+    ok = stop_app(Rabbit),
+    ok = stop_app(Hare),
+    assert_failure(fun () -> start_app(Hare) end),
+    ok = start_app(Rabbit),
+    ok = start_app(Hare),
+    ok.
+
+%% This tests that the nodes in the cluster are notified immediately of a node
+%% join, and not just after the app is started.
+join_to_start_interval(Config) ->
+    [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+    ok = stop_app(Rabbit),
+    ok = join_cluster(Rabbit, Hare),
+    assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+                          [Rabbit, Hare]),
+    ok = start_app(Rabbit),
+    assert_clustered([Rabbit, Hare]).
+
+forget_cluster_node(Config) ->
+    [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+    %% Trying to remove a node not in the cluster should fail
+    assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end),
+
+    stop_join_start(Rabbit, Hare),
+    assert_clustered([Rabbit, Hare]),
+
+    %% Trying to remove an online node should fail
+    assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end),
+
+    ok = stop_app(Rabbit),
+    %% We're passing the --offline flag, but Hare is online
+    assert_failure(fun () -> forget_cluster_node(Hare, Rabbit, true) end),
+    %% Removing some non-existant node will fail
+    assert_failure(fun () -> forget_cluster_node(Hare, non@existant) end),
+    ok = forget_cluster_node(Hare, Rabbit),
+    assert_not_clustered(Hare),
+    assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+                          [Rabbit]),
+
+    %% Now we can't start Rabbit since it thinks that it's still in the cluster
+    %% with Hare, while Hare disagrees.
+    assert_failure(fun () -> start_app(Rabbit) end),
+
+    ok = reset(Rabbit),
+    ok = start_app(Rabbit),
+    assert_not_clustered(Rabbit),
+
+    %% Now we remove Rabbit from an offline node.
+    stop_join_start(Bunny, Hare),
+    stop_join_start(Rabbit, Hare),
+    assert_clustered([Rabbit, Hare, Bunny]),
+    ok = stop_app(Hare),
+    ok = stop_app(Rabbit),
+    ok = stop_app(Bunny),
+    %% This is fine but we need the flag
+    assert_failure(fun () -> forget_cluster_node(Hare, Bunny) end),
+    %% Also fails because hare node is still running
+    assert_failure(fun () -> forget_cluster_node(Hare, Bunny, true) end),
+    %% But this works
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+    {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+      ["forget_cluster_node", "--offline", Bunny]),
+    ok = rabbit_ct_broker_helpers:start_node(Config, Hare),
+    ok = start_app(Rabbit),
+    %% Bunny still thinks its clustered with Rabbit and Hare
+    assert_failure(fun () -> start_app(Bunny) end),
+    ok = reset(Bunny),
+    ok = start_app(Bunny),
+    assert_not_clustered(Bunny),
+    assert_clustered([Rabbit, Hare]).
+
+forget_removes_things(Config) ->
+    test_removes_things(Config, fun (R, H) -> ok = forget_cluster_node(H, R) end).
+
+reset_removes_things(Config) ->
+    test_removes_things(Config, fun (R, _H) -> ok = reset(R) end).
+
+test_removes_things(Config, LoseRabbit) ->
+    Unmirrored = <<"unmirrored-queue">>,
+    [Rabbit, Hare] = cluster_members(Config),
+    RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+    declare(RCh, Unmirrored),
+    ok = stop_app(Rabbit),
+
+    HCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
+    {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+        (catch declare(HCh, Unmirrored)),
+
+    ok = LoseRabbit(Rabbit, Hare),
+    HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare),
+    declare(HCh2, Unmirrored),
+    ok.
+
+forget_offline_removes_things(Config) ->
+    [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    Unmirrored = <<"unmirrored-queue">>,
+    X = <<"X">>,
+    RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+    declare(RCh, Unmirrored),
+
+    amqp_channel:call(RCh, #'exchange.declare'{durable     = true,
+                                               exchange    = X,
+                                               auto_delete = true}),
+    amqp_channel:call(RCh, #'queue.bind'{queue    = Unmirrored,
+                                         exchange = X}),
+    ok = rabbit_ct_broker_helpers:stop_broker(Config, Rabbit),
+
+    HCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
+    {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+        (catch declare(HCh, Unmirrored)),
+
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
+    {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+      ["forget_cluster_node", "--offline", Rabbit]),
+    ok = rabbit_ct_broker_helpers:start_node(Config, Hare),
+
+    HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare),
+    declare(HCh2, Unmirrored),
+    {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+        (catch amqp_channel:call(HCh2,#'exchange.declare'{durable     = true,
+                                                          exchange    = X,
+                                                          auto_delete = true,
+                                                          passive     = true})),
+    ok.
+
+forget_promotes_offline_slave(Config) ->
+    [A, B, C, D] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+    Q = <<"mirrored-queue">>,
+    declare(ACh, Q),
+    set_ha_policy(Config, Q, A, [B, C]),
+    set_ha_policy(Config, Q, A, [C, D]), %% Test add and remove from recoverable_slaves
+
+    %% Publish and confirm
+    amqp_channel:call(ACh, #'confirm.select'{}),
+    amqp_channel:cast(ACh, #'basic.publish'{routing_key = Q},
+                      #amqp_msg{props = #'P_basic'{delivery_mode = 2}}),
+    amqp_channel:wait_for_confirms(ACh),
+
+    %% We kill nodes rather than stop them in order to make sure
+    %% that we aren't dependent on anything that happens as they shut
+    %% down (see bug 26467).
+    ok = rabbit_ct_broker_helpers:kill_node(Config, D),
+    ok = rabbit_ct_broker_helpers:kill_node(Config, C),
+    ok = rabbit_ct_broker_helpers:kill_node(Config, B),
+    ok = rabbit_ct_broker_helpers:kill_node(Config, A),
+
+    {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, C,
+      ["force_boot"]),
+
+    ok = rabbit_ct_broker_helpers:start_node(Config, C),
+
+    %% We should now have the following dramatis personae:
+    %% A - down, master
+    %% B - down, used to be slave, no longer is, never had the message
+    %% C - running, should be slave, but has wiped the message on restart
+    %% D - down, recoverable slave, contains message
+    %%
+    %% So forgetting A should offline-promote the queue to D, keeping
+    %% the message.
+
+    {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, C,
+      ["forget_cluster_node", A]),
+
+    ok = rabbit_ct_broker_helpers:start_node(Config, D),
+    DCh2 = rabbit_ct_client_helpers:open_channel(Config, D),
+    #'queue.declare_ok'{message_count = 1} = declare(DCh2, Q),
+    ok.
+
+set_ha_policy(Config, Q, Master, Slaves) ->
+    Nodes = [list_to_binary(atom_to_list(N)) || N <- [Master | Slaves]],
+    rabbit_ct_broker_helpers:set_ha_policy(Config, Master, Q,
+      {<<"nodes">>, Nodes}),
+    await_slaves(Q, Master, Slaves).
+
+await_slaves(Q, Master, Slaves) ->
+    {ok, #amqqueue{pid        = MPid,
+                   slave_pids = SPids}} =
+        rpc:call(Master, rabbit_amqqueue, lookup,
+                 [rabbit_misc:r(<<"/">>, queue, Q)]),
+    ActMaster = node(MPid),
+    ActSlaves = lists:usort([node(P) || P <- SPids]),
+    case {Master, lists:usort(Slaves)} of
+        {ActMaster, ActSlaves} -> ok;
+        _                      -> timer:sleep(100),
+                                  await_slaves(Q, Master, Slaves)
+    end.
+
+force_boot(Config) ->
+    [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    {error, _, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+      ["force_boot"]),
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
+    ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+    {error, _} = rabbit_ct_broker_helpers:start_node(Config, Rabbit),
+    {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+      ["force_boot"]),
+    ok = rabbit_ct_broker_helpers:start_node(Config, Rabbit),
+    ok.
+
+change_cluster_node_type(Config) ->
+    [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+    %% Trying to change the ram node when not clustered should always fail
+    ok = stop_app(Rabbit),
+    assert_failure(fun () -> change_cluster_node_type(Rabbit, ram) end),
+    assert_failure(fun () -> change_cluster_node_type(Rabbit, disc) end),
+    ok = start_app(Rabbit),
+
+    ok = stop_app(Rabbit),
+    join_cluster(Rabbit, Hare),
+    assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+                          [Rabbit, Hare]),
+    change_cluster_node_type(Rabbit, ram),
+    assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]},
+                          [Rabbit, Hare]),
+    change_cluster_node_type(Rabbit, disc),
+    assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+                          [Rabbit, Hare]),
+    change_cluster_node_type(Rabbit, ram),
+    ok = start_app(Rabbit),
+    assert_cluster_status({[Rabbit, Hare], [Hare], [Hare, Rabbit]},
+                          [Rabbit, Hare]),
+
+    %% Changing to ram when you're the only ram node should fail
+    ok = stop_app(Hare),
+    assert_failure(fun () -> change_cluster_node_type(Hare, ram) end),
+    ok = start_app(Hare).
+
+change_cluster_when_node_offline(Config) ->
+    [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+    %% Cluster the three notes
+    stop_join_start(Rabbit, Hare),
+    assert_clustered([Rabbit, Hare]),
+
+    stop_join_start(Bunny, Hare),
+    assert_clustered([Rabbit, Hare, Bunny]),
+
+    %% Bring down Rabbit, and remove Bunny from the cluster while
+    %% Rabbit is offline
+    ok = stop_app(Rabbit),
+    ok = stop_app(Bunny),
+    ok = reset(Bunny),
+    assert_cluster_status({[Bunny], [Bunny], []}, [Bunny]),
+    assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Hare]),
+    assert_cluster_status(
+      {[Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny], [Hare, Bunny]}, [Rabbit]),
+
+    %% Bring Rabbit back up
+    ok = start_app(Rabbit),
+    assert_clustered([Rabbit, Hare]),
+    ok = start_app(Bunny),
+    assert_not_clustered(Bunny),
+
+    %% Now the same, but Rabbit is a RAM node, and we bring up Bunny
+    %% before
+    ok = stop_app(Rabbit),
+    ok = change_cluster_node_type(Rabbit, ram),
+    ok = start_app(Rabbit),
+    stop_join_start(Bunny, Hare),
+    assert_cluster_status(
+      {[Rabbit, Hare, Bunny], [Hare, Bunny], [Rabbit, Hare, Bunny]},
+      [Rabbit, Hare, Bunny]),
+    ok = stop_app(Rabbit),
+    ok = stop_app(Bunny),
+    ok = reset(Bunny),
+    ok = start_app(Bunny),
+    assert_not_clustered(Bunny),
+    assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]}, [Hare]),
+    assert_cluster_status(
+      {[Rabbit, Hare, Bunny], [Hare, Bunny], [Hare, Bunny]},
+      [Rabbit]),
+    ok = start_app(Rabbit),
+    assert_cluster_status({[Rabbit, Hare], [Hare], [Rabbit, Hare]},
+                          [Rabbit, Hare]),
+    assert_not_clustered(Bunny).
+
+update_cluster_nodes(Config) ->
+    [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+    %% Mnesia is running...
+    assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end),
+
+    ok = stop_app(Rabbit),
+    ok = join_cluster(Rabbit, Hare),
+    ok = stop_app(Bunny),
+    ok = join_cluster(Bunny, Hare),
+    ok = start_app(Bunny),
+    stop_reset_start(Hare),
+    assert_failure(fun () -> start_app(Rabbit) end),
+    %% Bogus node
+    assert_failure(fun () -> update_cluster_nodes(Rabbit, non@existant) end),
+    %% Inconsisent node
+    assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end),
+    ok = update_cluster_nodes(Rabbit, Bunny),
+    ok = start_app(Rabbit),
+    assert_not_clustered(Hare),
+    assert_clustered([Rabbit, Bunny]).
+
+erlang_config(Config) ->
+    [Rabbit, Hare] = cluster_members(Config),
+
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, {[Rabbit], disc}]),
+    ok = start_app(Hare),
+    assert_clustered([Rabbit, Hare]),
+
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, {[Rabbit], ram}]),
+    ok = start_app(Hare),
+    assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+                          [Rabbit, Hare]),
+
+    %% Check having a stop_app'ed node around doesn't break completely.
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = stop_app(Rabbit),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, {[Rabbit], disc}]),
+    ok = start_app(Hare),
+    ok = start_app(Rabbit),
+    assert_not_clustered(Hare),
+    assert_not_clustered(Rabbit),
+
+    %% We get a warning but we start anyway
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, {[non@existent], disc}]),
+    ok = start_app(Hare),
+    assert_not_clustered(Hare),
+    assert_not_clustered(Rabbit),
+
+    %% If we use a legacy config file, the node fails to start.
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, [Rabbit]]),
+    assert_failure(fun () -> start_app(Hare) end),
+    assert_not_clustered(Rabbit),
+
+    %% If we use an invalid node name, the node fails to start.
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, {["Mike's computer"], disc}]),
+    assert_failure(fun () -> start_app(Hare) end),
+    assert_not_clustered(Rabbit),
+
+    %% If we use an invalid node type, the node fails to start.
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, {[Rabbit], blue}]),
+    assert_failure(fun () -> start_app(Hare) end),
+    assert_not_clustered(Rabbit),
+
+    %% If we use an invalid cluster_nodes conf, the node fails to start.
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, true]),
+    assert_failure(fun () -> start_app(Hare) end),
+    assert_not_clustered(Rabbit),
+
+    ok = stop_app(Hare),
+    ok = reset(Hare),
+    ok = rpc:call(Hare, application, set_env,
+                  [rabbit, cluster_nodes, "Yes, please"]),
+    assert_failure(fun () -> start_app(Hare) end),
+    assert_not_clustered(Rabbit).
+
+force_reset_node(Config) ->
+    [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+    stop_join_start(Rabbit, Hare),
+    stop_app(Rabbit),
+    force_reset(Rabbit),
+    %% Hare thinks that Rabbit is still clustered
+    assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+                          [Hare]),
+    %% %% ...but it isn't
+    assert_cluster_status({[Rabbit], [Rabbit], []}, [Rabbit]),
+    %% We can rejoin Rabbit and Hare
+    update_cluster_nodes(Rabbit, Hare),
+    start_app(Rabbit),
+    assert_clustered([Rabbit, Hare]).
+
+status_with_alarm(Config) ->
+    [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+
+    %% Given: an alarm is raised each node.
+    rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+      ["set_vm_memory_high_watermark", "0.000000001"]),
+    rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+      ["set_disk_free_limit", "2048G"]),
+
+    %% When: we ask for cluster status.
+    {ok, S} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+      ["cluster_status"]),
+    {ok, R} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+      ["cluster_status"]),
+
+    %% Then: both nodes have printed alarm information for eachother.
+    ok = alarm_information_on_each_node(S, Rabbit, Hare),
+    ok = alarm_information_on_each_node(R, Rabbit, Hare).
+
+
+%% ----------------------------------------------------------------------------
+%% Internal utils
+
+cluster_members(Config) ->
+    rabbit_ct_broker_helpers:get_node_configs(Config, nodename).
+
+assert_cluster_status(Status0, Nodes) ->
+    Status = {AllNodes, _, _} = sort_cluster_status(Status0),
+    wait_for_cluster_status(Status, AllNodes, Nodes).
+
+wait_for_cluster_status(Status, AllNodes, Nodes) ->
+    Max = 10000 / ?LOOP_RECURSION_DELAY,
+    wait_for_cluster_status(0, Max, Status, AllNodes, Nodes).
+
+wait_for_cluster_status(N, Max, Status, _AllNodes, Nodes) when N >= Max ->
+    erlang:error({cluster_status_max_tries_failed,
+                  [{nodes, Nodes},
+                   {expected_status, Status},
+                   {max_tried, Max}]});
+wait_for_cluster_status(N, Max, Status, AllNodes, Nodes) ->
+    case lists:all(fun (Node) ->
+                            verify_status_equal(Node, Status, AllNodes)
+                   end, Nodes) of
+        true  -> ok;
+        false -> timer:sleep(?LOOP_RECURSION_DELAY),
+                 wait_for_cluster_status(N + 1, Max, Status, AllNodes, Nodes)
+    end.
+
+verify_status_equal(Node, Status, AllNodes) ->
+    NodeStatus = sort_cluster_status(cluster_status(Node)),
+    (AllNodes =/= [Node]) =:= rpc:call(Node, rabbit_mnesia, is_clustered, [])
+        andalso NodeStatus =:= Status.
+
+cluster_status(Node) ->
+    {rpc:call(Node, rabbit_mnesia, cluster_nodes, [all]),
+     rpc:call(Node, rabbit_mnesia, cluster_nodes, [disc]),
+     rpc:call(Node, rabbit_mnesia, cluster_nodes, [running])}.
+
+sort_cluster_status({All, Disc, Running}) ->
+    {lists:sort(All), lists:sort(Disc), lists:sort(Running)}.
+
+assert_clustered(Nodes) ->
+    assert_cluster_status({Nodes, Nodes, Nodes}, Nodes).
+
+assert_not_clustered(Node) ->
+    assert_cluster_status({[Node], [Node], [Node]}, [Node]).
+
+assert_failure(Fun) ->
+    case catch Fun() of
+        {error, Reason}                -> Reason;
+        {error_string, Reason}         -> Reason;
+        {badrpc, {'EXIT', Reason}}     -> Reason;
+        {badrpc_multi, Reason, _Nodes} -> Reason;
+        Other                          -> exit({expected_failure, Other})
+    end.
+
+stop_app(Node) ->
+    control_action(stop_app, Node).
+
+start_app(Node) ->
+    control_action(start_app, Node).
+
+join_cluster(Node, To) ->
+    join_cluster(Node, To, false).
+
+join_cluster(Node, To, Ram) ->
+    control_action(join_cluster, Node, [atom_to_list(To)], [{"--ram", Ram}]).
+
+reset(Node) ->
+    control_action(reset, Node).
+
+force_reset(Node) ->
+    control_action(force_reset, Node).
+
+forget_cluster_node(Node, Removee, RemoveWhenOffline) ->
+    control_action(forget_cluster_node, Node, [atom_to_list(Removee)],
+                   [{"--offline", RemoveWhenOffline}]).
+
+forget_cluster_node(Node, Removee) ->
+    forget_cluster_node(Node, Removee, false).
+
+change_cluster_node_type(Node, Type) ->
+    control_action(change_cluster_node_type, Node, [atom_to_list(Type)]).
+
+update_cluster_nodes(Node, DiscoveryNode) ->
+    control_action(update_cluster_nodes, Node, [atom_to_list(DiscoveryNode)]).
+
+stop_join_start(Node, ClusterTo, Ram) ->
+    ok = stop_app(Node),
+    ok = join_cluster(Node, ClusterTo, Ram),
+    ok = start_app(Node).
+
+stop_join_start(Node, ClusterTo) ->
+    stop_join_start(Node, ClusterTo, false).
+
+stop_reset_start(Node) ->
+    ok = stop_app(Node),
+    ok = reset(Node),
+    ok = start_app(Node).
+
+control_action(Command, Node) ->
+    control_action(Command, Node, [], []).
+
+control_action(Command, Node, Args) ->
+    control_action(Command, Node, Args, []).
+
+control_action(Command, Node, Args, Opts) ->
+    rpc:call(Node, rabbit_control_main, action,
+             [Command, Node, Args, Opts,
+              fun io:format/2]).
+
+declare(Ch, Name) ->
+    Res = amqp_channel:call(Ch, #'queue.declare'{durable = true,
+                                                 queue   = Name}),
+    amqp_channel:call(Ch, #'queue.bind'{queue    = Name,
+                                        exchange = <<"amq.fanout">>}),
+    Res.
+
+alarm_information_on_each_node(Output, Rabbit, Hare) ->
+
+    A = string:str(Output, "alarms"), true = A > 0,
+
+    %% Test that names are printed after `alarms': this counts on
+    %% output with a `{Name, Value}' kind of format, for listing
+    %% alarms, so that we can miss any node names in preamble text.
+    Alarms = string:substr(Output, A),
+    RabbitStr = atom_to_list(Rabbit),
+    HareStr = atom_to_list(Hare),
+    match = re:run(Alarms, "\\{'?" ++ RabbitStr ++ "'?,\\[memory\\]\\}",
+      [{capture, none}]),
+    match = re:run(Alarms, "\\{'?" ++ HareStr ++ "'?,\\[disk\\]\\}",
+      [{capture, none}]),
+
+    ok.
diff --git a/rabbitmq-server/test/crashing_queues_SUITE.erl b/rabbitmq-server/test/crashing_queues_SUITE.erl
new file mode 100644 (file)
index 0000000..872b771
--- /dev/null
@@ -0,0 +1,269 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(crashing_queues_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+    [
+      {group, cluster_size_2}
+    ].
+
+groups() ->
+    [
+      {cluster_size_2, [], [
+          crashing_unmirrored,
+          crashing_mirrored,
+          give_up_after_repeated_crashes
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+    rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 2}
+      ]).
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase),
+    ClusterSize = ?config(rmq_nodes_count, Config),
+    TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, Testcase},
+        {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+    Config1 = rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+crashing_unmirrored(Config) ->
+    [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+    ConnB = rabbit_ct_client_helpers:open_connection(Config, B),
+    QName = <<"crashing_unmirrored-q">>,
+    amqp_channel:call(ChA, #'confirm.select'{}),
+    test_queue_failure(A, ChA, ConnB, 1, 0,
+                       #'queue.declare'{queue = QName, durable = true}),
+    test_queue_failure(A, ChA, ConnB, 0, 0,
+                       #'queue.declare'{queue = QName, durable = false}),
+    ok.
+
+crashing_mirrored(Config) ->
+    [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<".*">>, <<"all">>),
+    ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+    ConnB = rabbit_ct_client_helpers:open_connection(Config, B),
+    QName = <<"crashing_mirrored-q">>,
+    amqp_channel:call(ChA, #'confirm.select'{}),
+    test_queue_failure(A, ChA, ConnB, 2, 1,
+                       #'queue.declare'{queue = QName, durable = true}),
+    ok.
+
+test_queue_failure(Node, Ch, RaceConn, MsgCount, SlaveCount, Decl) ->
+    #'queue.declare_ok'{queue = QName} = amqp_channel:call(Ch, Decl),
+    try
+        publish(Ch, QName, transient),
+        publish(Ch, QName, durable),
+        Racer = spawn_declare_racer(RaceConn, Decl),
+        kill_queue(Node, QName),
+        assert_message_count(MsgCount, Ch, QName),
+        assert_slave_count(SlaveCount, Node, QName),
+        stop_declare_racer(Racer)
+    after
+        amqp_channel:call(Ch, #'queue.delete'{queue = QName})
+    end.
+
+give_up_after_repeated_crashes(Config) ->
+    [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+    ChB = rabbit_ct_client_helpers:open_channel(Config, B),
+    QName = <<"give_up_after_repeated_crashes-q">>,
+    amqp_channel:call(ChA, #'confirm.select'{}),
+    amqp_channel:call(ChA, #'queue.declare'{queue   = QName,
+                                            durable = true}),
+    await_state(A, QName, running),
+    publish(ChA, QName, durable),
+    kill_queue_hard(A, QName),
+    {'EXIT', _} = (catch amqp_channel:call(
+                           ChA, #'queue.declare'{queue   = QName,
+                                                 durable = true})),
+    await_state(A, QName, crashed),
+    amqp_channel:call(ChB, #'queue.delete'{queue = QName}),
+    amqp_channel:call(ChB, #'queue.declare'{queue   = QName,
+                                            durable = true}),
+    await_state(A, QName, running),
+
+    %% Since it's convenient, also test absent queue status here.
+    rabbit_ct_broker_helpers:stop_node(Config, B),
+    await_state(A, QName, down),
+    ok.
+
+
+publish(Ch, QName, DelMode) ->
+    Publish = #'basic.publish'{exchange = <<>>, routing_key = QName},
+    Msg = #amqp_msg{props = #'P_basic'{delivery_mode = del_mode(DelMode)}},
+    amqp_channel:cast(Ch, Publish, Msg),
+    amqp_channel:wait_for_confirms(Ch).
+
+del_mode(transient) -> 1;
+del_mode(durable)   -> 2.
+
+spawn_declare_racer(Conn, Decl) ->
+    Self = self(),
+    spawn_link(fun() -> declare_racer_loop(Self, Conn, Decl) end).
+
+stop_declare_racer(Pid) ->
+    Pid ! stop,
+    MRef = erlang:monitor(process, Pid),
+    receive
+        {'DOWN', MRef, process, Pid, _} -> ok
+    end.
+
+declare_racer_loop(Parent, Conn, Decl) ->
+    receive
+        stop -> unlink(Parent)
+    after 0 ->
+            %% Catch here because we might happen to catch the queue
+            %% while it is in the middle of recovering and thus
+            %% explode with NOT_FOUND because crashed. Doesn't matter,
+            %% we are only in this loop to try to fool the recovery
+            %% code anyway.
+            try
+                case amqp_connection:open_channel(Conn) of
+                    {ok, Ch} -> amqp_channel:call(Ch, Decl);
+                    closing  -> ok
+                end
+            catch
+                exit:_ ->
+                    ok
+            end,
+            declare_racer_loop(Parent, Conn, Decl)
+    end.
+
+await_state(Node, QName, State) ->
+    await_state(Node, QName, State, 30000).
+
+await_state(Node, QName, State, Time) ->
+    case state(Node, QName) of
+        State ->
+            ok;
+        Other ->
+            case Time of
+                0 -> exit({timeout_awaiting_state, State, Other});
+                _ -> timer:sleep(100),
+                     await_state(Node, QName, State, Time - 100)
+            end
+    end.
+
+state(Node, QName) ->
+    V = <<"/">>,
+    Res = rabbit_misc:r(V, queue, QName),
+    Infos = rpc:call(Node, rabbit_amqqueue, info_all, [V, [name, state]]),
+    case Infos of
+        []                               -> undefined;
+        [[{name,  Res}, {state, State}]] -> State
+    end.
+
+kill_queue_hard(Node, QName) ->
+    case kill_queue(Node, QName) of
+        crashed -> ok;
+        _NewPid -> timer:sleep(100),
+                   kill_queue_hard(Node, QName)
+    end.
+
+kill_queue(Node, QName) ->
+    Pid1 = queue_pid(Node, QName),
+    exit(Pid1, boom),
+    await_new_pid(Node, QName, Pid1).
+
+queue_pid(Node, QName) ->
+    #amqqueue{pid   = QPid,
+              state = State} = lookup(Node, QName),
+    case State of
+        crashed -> case sup_child(Node, rabbit_amqqueue_sup_sup) of
+                       {ok, _}           -> QPid;   %% restarting
+                       {error, no_child} -> crashed %% given up
+                   end;
+        _       -> QPid
+    end.
+
+sup_child(Node, Sup) ->
+    case rpc:call(Node, supervisor2, which_children, [Sup]) of
+        [{_, Child, _, _}]              -> {ok, Child};
+        []                              -> {error, no_child};
+        {badrpc, {'EXIT', {noproc, _}}} -> {error, no_sup}
+    end.
+
+lookup(Node, QName) ->
+    {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup,
+                       [rabbit_misc:r(<<"/">>, queue, QName)]),
+    Q.
+
+await_new_pid(Node, QName, OldPid) ->
+    case queue_pid(Node, QName) of
+        OldPid -> timer:sleep(10),
+                  await_new_pid(Node, QName, OldPid);
+        New    -> New
+    end.
+
+assert_message_count(Count, Ch, QName) ->
+    #'queue.declare_ok'{message_count = Count} =
+        amqp_channel:call(Ch, #'queue.declare'{queue   = QName,
+                                               passive = true}).
+
+assert_slave_count(Count, Node, QName) ->
+    Q = lookup(Node, QName),
+    [{_, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [slave_pids]]),
+    RealCount = case Pids of
+                    '' -> 0;
+                    _  -> length(Pids)
+                end,
+    case RealCount of
+        Count ->
+            ok;
+        _ when RealCount < Count ->
+            timer:sleep(10),
+            assert_slave_count(Count, Node, QName);
+        _ ->
+            exit({too_many_slaves, Count, RealCount})
+    end.
diff --git a/rabbitmq-server/test/dummy_event_receiver.erl b/rabbitmq-server/test/dummy_event_receiver.erl
new file mode 100644 (file)
index 0000000..75db367
--- /dev/null
@@ -0,0 +1,58 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(dummy_event_receiver).
+
+-export([start/3, stop/0]).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+         terminate/2, code_change/3]).
+
+-include("rabbit.hrl").
+
+start(Pid, Nodes, Types) ->
+    Oks = [ok || _ <- Nodes],
+    {Oks, _} = rpc:multicall(Nodes, gen_event, add_handler,
+                             [rabbit_event, ?MODULE, [Pid, Types]]).
+
+stop() ->
+    gen_event:delete_handler(rabbit_event, ?MODULE, []).
+
+%%----------------------------------------------------------------------------
+
+init([Pid, Types]) ->
+    {ok, {Pid, Types}}.
+
+handle_call(_Request, State) ->
+    {ok, not_understood, State}.
+
+handle_event(Event = #event{type = Type}, State = {Pid, Types}) ->
+    case lists:member(Type, Types) of
+        true  -> Pid ! Event;
+        false -> ok
+    end,
+    {ok, State}.
+
+handle_info(_Info, State) ->
+    {ok, State}.
+
+terminate(_Arg, _State) ->
+    ok.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+%%----------------------------------------------------------------------------
diff --git a/rabbitmq-server/test/dummy_runtime_parameters.erl b/rabbitmq-server/test/dummy_runtime_parameters.erl
new file mode 100644 (file)
index 0000000..d80ec78
--- /dev/null
@@ -0,0 +1,72 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(dummy_runtime_parameters).
+-behaviour(rabbit_runtime_parameter).
+-behaviour(rabbit_policy_validator).
+
+-include("rabbit.hrl").
+
+-export([validate/5, notify/4, notify_clear/3]).
+-export([register/0, unregister/0]).
+-export([validate_policy/1]).
+-export([register_policy_validator/0, unregister_policy_validator/0]).
+
+%----------------------------------------------------------------------------
+
+register() ->
+    rabbit_registry:register(runtime_parameter, <<"test">>, ?MODULE).
+
+unregister() ->
+    rabbit_registry:unregister(runtime_parameter, <<"test">>).
+
+validate(_, <<"test">>, <<"good">>,  _Term, _User)      -> ok;
+validate(_, <<"test">>, <<"maybe">>, <<"good">>, _User) -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, none)       -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, User) ->
+    case lists:member(administrator, User#user.tags) of
+        true  -> ok;
+        false -> {error, "meh", []}
+    end;
+validate(_, <<"test">>, _, _, _)                        -> {error, "meh", []}.
+
+notify(_, _, _, _) -> ok.
+notify_clear(_, _, _) -> ok.
+
+%----------------------------------------------------------------------------
+
+register_policy_validator() ->
+    rabbit_registry:register(policy_validator, <<"testeven">>, ?MODULE),
+    rabbit_registry:register(policy_validator, <<"testpos">>,  ?MODULE).
+
+unregister_policy_validator() ->
+    rabbit_registry:unregister(policy_validator, <<"testeven">>),
+    rabbit_registry:unregister(policy_validator, <<"testpos">>).
+
+validate_policy([{<<"testeven">>, Terms}]) when is_list(Terms) ->
+    case  length(Terms) rem 2 =:= 0 of
+        true  -> ok;
+        false -> {error, "meh", []}
+    end;
+
+validate_policy([{<<"testpos">>, Terms}]) when is_list(Terms) ->
+    case lists:all(fun (N) -> is_integer(N) andalso N > 0 end, Terms) of
+        true  -> ok;
+        false -> {error, "meh", []}
+    end;
+
+validate_policy(_) ->
+    {error, "meh", []}.
diff --git a/rabbitmq-server/test/dummy_supervisor2.erl b/rabbitmq-server/test/dummy_supervisor2.erl
new file mode 100644 (file)
index 0000000..9ca3f63
--- /dev/null
@@ -0,0 +1,41 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(dummy_supervisor2).
+
+-behaviour(supervisor2).
+
+-export([
+    start_link/0,
+    init/1
+  ]).
+
+start_link() ->
+    Pid = spawn_link(fun () ->
+                             process_flag(trap_exit, true),
+                             receive stop -> ok end
+                     end),
+    {ok, Pid}.
+
+init([Timeout]) ->
+    {ok, {{one_for_one, 0, 1},
+          [{test_sup, {supervisor2, start_link,
+                       [{local, ?MODULE}, ?MODULE, []]},
+            transient, Timeout, supervisor, [?MODULE]}]}};
+init([]) ->
+    {ok, {{simple_one_for_one, 0, 1},
+          [{test_worker, {?MODULE, start_link, []},
+            temporary, 1000, worker, [?MODULE]}]}}.
diff --git a/rabbitmq-server/test/dynamic_ha_SUITE.erl b/rabbitmq-server/test/dynamic_ha_SUITE.erl
new file mode 100644 (file)
index 0000000..5872d97
--- /dev/null
@@ -0,0 +1,329 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(dynamic_ha_SUITE).
+
+%% rabbit_tests:test_dynamic_mirroring() is a unit test which should
+%% test the logic of what all the policies decide to do, so we don't
+%% need to exhaustively test that here. What we need to test is that:
+%%
+%% * Going from non-mirrored to mirrored works and vice versa
+%% * Changing policy can add / remove mirrors and change the master
+%% * Adding a node will create a new mirror when there are not enough nodes
+%%   for the policy
+%% * Removing a node will not create a new mirror even if the policy
+%%   logic wants it (since this gives us a good way to lose messages
+%%   on cluster shutdown, by repeated failover to new nodes)
+%%
+%% The first two are change_policy, the last two are change_cluster
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(QNAME, <<"ha.test">>).
+-define(POLICY, <<"^ha.test$">>). %% " emacs
+-define(VHOST, <<"/">>).
+
+all() ->
+    [
+      {group, unclustered},
+      {group, clustered}
+    ].
+
+groups() ->
+    [
+      {unclustered, [], [
+          {cluster_size_5, [], [
+              change_cluster
+            ]}
+        ]},
+      {clustered, [], [
+          {cluster_size_2, [], [
+              vhost_deletion,
+              promote_on_shutdown
+            ]},
+          {cluster_size_3, [], [
+              change_policy,
+              rapid_change
+            ]}
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(unclustered, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]);
+init_per_group(clustered, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(cluster_size_2, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]);
+init_per_group(cluster_size_5, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 5}]).
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase),
+    ClusterSize = ?config(rmq_nodes_count, Config),
+    TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodename_suffix, Testcase},
+        {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+    Config1 = rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+change_policy(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+    %% When we first declare a queue with no policy, it's not HA.
+    amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}),
+    assert_slaves(A, ?QNAME, {A, ''}),
+
+    %% Give it policy "all", it becomes HA and gets all mirrors
+    rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, <<"all">>),
+    assert_slaves(A, ?QNAME, {A, [B, C]}),
+
+    %% Give it policy "nodes", it gets specific mirrors
+    rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+      {<<"nodes">>, [rabbit_misc:atom_to_binary(A),
+                     rabbit_misc:atom_to_binary(B)]}),
+    assert_slaves(A, ?QNAME, {A, [B]}),
+
+    %% Now explicitly change the mirrors
+    rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+      {<<"nodes">>, [rabbit_misc:atom_to_binary(A),
+                     rabbit_misc:atom_to_binary(C)]}),
+    assert_slaves(A, ?QNAME, {A, [C]}, [{A, [B, C]}]),
+
+    %% Clear the policy, and we go back to non-mirrored
+    rabbit_ct_broker_helpers:clear_policy(Config, A, ?POLICY),
+    assert_slaves(A, ?QNAME, {A, ''}),
+
+    %% Test switching "away" from an unmirrored node
+    rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+      {<<"nodes">>, [rabbit_misc:atom_to_binary(B),
+                     rabbit_misc:atom_to_binary(C)]}),
+    assert_slaves(A, ?QNAME, {A, [B, C]}, [{A, [B]}, {A, [C]}]),
+
+    ok.
+
+change_cluster(Config) ->
+    [A, B, C, D, E] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    rabbit_ct_broker_helpers:cluster_nodes(Config, [A, B, C]),
+    ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+    amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}),
+    assert_slaves(A, ?QNAME, {A, ''}),
+
+    %% Give it policy exactly 4, it should mirror to all 3 nodes
+    rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+      {<<"exactly">>, 4}),
+    assert_slaves(A, ?QNAME, {A, [B, C]}),
+
+    %% Add D and E, D joins in
+    rabbit_ct_broker_helpers:cluster_nodes(Config, [A, D, E]),
+    assert_slaves(A, ?QNAME, {A, [B, C, D]}),
+
+    %% Remove D, E joins in
+    rabbit_ct_broker_helpers:stop_node(Config, D),
+    assert_slaves(A, ?QNAME, {A, [B, C, E]}),
+
+    ok.
+
+rapid_change(Config) ->
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+    {_Pid, MRef} = spawn_monitor(
+                     fun() ->
+                             [rapid_amqp_ops(ACh, I) || I <- lists:seq(1, 100)]
+                     end),
+    rapid_loop(Config, A, MRef),
+    ok.
+
+rapid_amqp_ops(Ch, I) ->
+    Payload = list_to_binary(integer_to_list(I)),
+    amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME}),
+    amqp_channel:cast(Ch, #'basic.publish'{exchange = <<"">>,
+                                           routing_key = ?QNAME},
+                      #amqp_msg{payload = Payload}),
+    amqp_channel:subscribe(Ch, #'basic.consume'{queue    = ?QNAME,
+                                                no_ack   = true}, self()),
+    receive #'basic.consume_ok'{} -> ok
+    end,
+    receive {#'basic.deliver'{}, #amqp_msg{payload = Payload}} ->
+            ok
+    end,
+    amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}).
+
+rapid_loop(Config, Node, MRef) ->
+    receive
+        {'DOWN', MRef, process, _Pid, normal} ->
+            ok;
+        {'DOWN', MRef, process, _Pid, Reason} ->
+            exit({amqp_ops_died, Reason})
+    after 0 ->
+            rabbit_ct_broker_helpers:set_ha_policy(Config, Node, ?POLICY,
+              <<"all">>),
+            rabbit_ct_broker_helpers:clear_policy(Config, Node, ?POLICY),
+            rapid_loop(Config, Node, MRef)
+    end.
+
+%% Vhost deletion needs to successfully tear down policies and queues
+%% with policies. At least smoke-test that it doesn't blow up.
+vhost_deletion(Config) ->
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+    ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+    amqp_channel:call(ACh, #'queue.declare'{queue = <<"vhost_deletion-q">>}),
+    ok = rpc:call(A, rabbit_vhost, delete, [<<"/">>]),
+    ok.
+
+promote_on_shutdown(Config) ->
+    [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.promote">>,
+      <<"all">>, [{<<"ha-promote-on-shutdown">>, <<"always">>}]),
+    rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromote">>,
+      <<"all">>),
+
+    ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+    [begin
+         amqp_channel:call(ACh, #'queue.declare'{queue   = Q,
+                                                 durable = true}),
+         rabbit_ct_client_helpers:publish(ACh, Q, 10)
+     end || Q <- [<<"ha.promote.test">>, <<"ha.nopromote.test">>]],
+    ok = rabbit_ct_broker_helpers:restart_node(Config, B),
+    ok = rabbit_ct_broker_helpers:stop_node(Config, A),
+    BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+    #'queue.declare_ok'{message_count = 0} =
+        amqp_channel:call(
+          BCh, #'queue.declare'{queue   = <<"ha.promote.test">>,
+                                durable = true}),
+    ?assertExit(
+       {{shutdown, {server_initiated_close, 404, _}}, _},
+       amqp_channel:call(
+         BCh, #'queue.declare'{queue   = <<"ha.nopromote.test">>,
+                               durable = true})),
+    ok = rabbit_ct_broker_helpers:start_node(Config, A),
+    ACh2 = rabbit_ct_client_helpers:open_channel(Config, A),
+    #'queue.declare_ok'{message_count = 10} =
+        amqp_channel:call(
+          ACh2, #'queue.declare'{queue   = <<"ha.nopromote.test">>,
+                                 durable = true}),
+    ok.
+
+%%----------------------------------------------------------------------------
+
+assert_slaves(RPCNode, QName, Exp) ->
+    assert_slaves(RPCNode, QName, Exp, []).
+
+assert_slaves(RPCNode, QName, Exp, PermittedIntermediate) ->
+    assert_slaves0(RPCNode, QName, Exp,
+                  [{get(previous_exp_m_node), get(previous_exp_s_nodes)} |
+                   PermittedIntermediate]).
+
+assert_slaves0(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate) ->
+    Q = find_queue(QName, RPCNode),
+    Pid = proplists:get_value(pid, Q),
+    SPids = proplists:get_value(slave_pids, Q),
+    ActMNode = node(Pid),
+    ActSNodes = case SPids of
+                    '' -> '';
+                    _  -> [node(SPid) || SPid <- SPids]
+                end,
+    case ExpMNode =:= ActMNode andalso equal_list(ExpSNodes, ActSNodes) of
+        false ->
+            %% It's an async change, so if nothing has changed let's
+            %% just wait - of course this means if something does not
+            %% change when expected then we time out the test which is
+            %% a bit tedious
+            case [found || {PermMNode, PermSNodes} <- PermittedIntermediate,
+                           PermMNode =:= ActMNode,
+                           equal_list(PermSNodes, ActSNodes)] of
+                [] -> ct:fail("Expected ~p / ~p, got ~p / ~p~nat ~p~n",
+                              [ExpMNode, ExpSNodes, ActMNode, ActSNodes,
+                               get_stacktrace()]);
+                _  -> timer:sleep(100),
+                      assert_slaves0(RPCNode, QName, {ExpMNode, ExpSNodes},
+                                     PermittedIntermediate)
+            end;
+        true ->
+            put(previous_exp_m_node, ExpMNode),
+            put(previous_exp_s_nodes, ExpSNodes),
+            ok
+    end.
+
+equal_list('',    '')   -> true;
+equal_list('',    _Act) -> false;
+equal_list(_Exp,  '')   -> false;
+equal_list([],    [])   -> true;
+equal_list(_Exp,  [])   -> false;
+equal_list([],    _Act) -> false;
+equal_list([H|T], Act)  -> case lists:member(H, Act) of
+                               true  -> equal_list(T, Act -- [H]);
+                               false -> false
+                           end.
+
+find_queue(QName, RPCNode) ->
+    Qs = rpc:call(RPCNode, rabbit_amqqueue, info_all, [?VHOST], infinity),
+    case find_queue0(QName, Qs) of
+        did_not_find_queue -> timer:sleep(100),
+                              find_queue(QName, RPCNode);
+        Q -> Q
+    end.
+
+find_queue0(QName, Qs) ->
+    case [Q || Q <- Qs, proplists:get_value(name, Q) =:=
+                   rabbit_misc:r(?VHOST, queue, QName)] of
+        [R] -> R;
+        []  -> did_not_find_queue
+    end.
+
+get_stacktrace() ->
+    try
+        throw(e)
+    catch
+        _:e ->
+            erlang:get_stacktrace()
+    end.
diff --git a/rabbitmq-server/test/eager_sync_SUITE.erl b/rabbitmq-server/test/eager_sync_SUITE.erl
new file mode 100644 (file)
index 0000000..93b308b
--- /dev/null
@@ -0,0 +1,278 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(eager_sync_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(QNAME, <<"ha.two.test">>).
+-define(QNAME_AUTO, <<"ha.auto.test">>).
+-define(MESSAGE_COUNT, 2000).
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+          eager_sync,
+          eager_sync_cancel,
+          eager_sync_auto,
+          eager_sync_auto_on_policy_change,
+          eager_sync_requeue
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase),
+    ClusterSize = 3,
+    TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, ClusterSize},
+        {rmq_nodes_clustered, true},
+        {rmq_nodename_suffix, Testcase},
+        {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps() ++ [
+        fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1,
+        fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1
+      ]).
+
+end_per_testcase(Testcase, Config) ->
+    Config1 = rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+eager_sync(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    %% Queue is on AB but not C.
+    ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+    Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+    amqp_channel:call(ACh, #'queue.declare'{queue   = ?QNAME,
+                                            durable = true}),
+
+    %% Don't sync, lose messages
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+    restart(Config, A),
+    restart(Config, B),
+    rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0),
+
+    %% Sync, keep messages
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+    restart(Config, A),
+    ok = sync(C, ?QNAME),
+    restart(Config, B),
+    rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+    %% Check the no-need-to-sync path
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+    ok = sync(C, ?QNAME),
+    rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+    %% keep unacknowledged messages
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+    rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 2),
+    restart(Config, A),
+    rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 3),
+    sync(C, ?QNAME),
+    restart(Config, B),
+    rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+    ok.
+
+eager_sync_cancel(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    %% Queue is on AB but not C.
+    ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+    Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+
+    set_app_sync_batch_size(A),
+    set_app_sync_batch_size(B),
+    set_app_sync_batch_size(C),
+
+    amqp_channel:call(ACh, #'queue.declare'{queue   = ?QNAME,
+                                            durable = true}),
+    {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence
+    eager_sync_cancel_test2(Config, A, B, C, Ch).
+
+eager_sync_cancel_test2(Config, A, B, C, Ch) ->
+    %% Sync then cancel
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+    restart(Config, A),
+    set_app_sync_batch_size(A),
+    spawn_link(fun() -> ok = sync_nowait(C, ?QNAME) end),
+    case wait_for_syncing(C, ?QNAME, 1) of
+        ok ->
+            case sync_cancel(C, ?QNAME) of
+                ok ->
+                    wait_for_running(C, ?QNAME),
+                    restart(Config, B),
+                    set_app_sync_batch_size(B),
+                    rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0),
+
+                    {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence
+                    ok;
+                {ok, not_syncing} ->
+                    %% Damn. Syncing finished between wait_for_syncing/3 and
+                    %% sync_cancel/2 above. Start again.
+                    amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}),
+                    eager_sync_cancel_test2(Config, A, B, C, Ch)
+            end;
+        synced_already ->
+            %% Damn. Syncing finished before wait_for_syncing/3. Start again.
+            amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}),
+            eager_sync_cancel_test2(Config, A, B, C, Ch)
+    end.
+
+eager_sync_auto(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+    Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+    amqp_channel:call(ACh, #'queue.declare'{queue   = ?QNAME_AUTO,
+                                            durable = true}),
+
+    %% Sync automatically, don't lose messages
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT),
+    restart(Config, A),
+    wait_for_sync(C, ?QNAME_AUTO),
+    restart(Config, B),
+    wait_for_sync(C, ?QNAME_AUTO),
+    rabbit_ct_client_helpers:consume(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT),
+
+    ok.
+
+eager_sync_auto_on_policy_change(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    %% Queue is on AB but not C.
+    ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+    Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+    amqp_channel:call(ACh, #'queue.declare'{queue   = ?QNAME,
+                                            durable = true}),
+
+    %% Sync automatically once the policy is changed to tell us to.
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+    restart(Config, A),
+    Params = [rabbit_misc:atom_to_binary(N) || N <- [A, B]],
+    rabbit_ct_broker_helpers:set_ha_policy(Config,
+      A, <<"^ha.two.">>, {<<"nodes">>, Params},
+      [{<<"ha-sync-mode">>, <<"automatic">>}]),
+    wait_for_sync(C, ?QNAME),
+
+    ok.
+
+eager_sync_requeue(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    %% Queue is on AB but not C.
+    ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+    Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+    amqp_channel:call(ACh, #'queue.declare'{queue   = ?QNAME,
+                                            durable = true}),
+
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME, 2),
+    {#'basic.get_ok'{delivery_tag = TagA}, _} =
+        amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}),
+    {#'basic.get_ok'{delivery_tag = TagB}, _} =
+        amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}),
+    amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagA, requeue = true}),
+    restart(Config, B),
+    ok = sync(C, ?QNAME),
+    amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagB, requeue = true}),
+    rabbit_ct_client_helpers:consume(Ch, ?QNAME, 2),
+
+    ok.
+
+restart(Config, Node) ->
+    rabbit_ct_broker_helpers:restart_broker(Config, Node).
+
+sync(Node, QName) ->
+    case sync_nowait(Node, QName) of
+        ok -> wait_for_sync(Node, QName),
+              ok;
+        R  -> R
+    end.
+
+sync_nowait(Node, QName) -> action(Node, sync_queue, QName).
+sync_cancel(Node, QName) -> action(Node, cancel_sync_queue, QName).
+
+wait_for_sync(Node, QName) ->
+    sync_detection_SUITE:wait_for_sync_status(true, Node, QName).
+
+action(Node, Action, QName) ->
+    rabbit_ct_broker_helpers:control_action(
+      Action, Node, [binary_to_list(QName)], [{"-p", "/"}]).
+
+queue(Node, QName) ->
+    QNameRes = rabbit_misc:r(<<"/">>, queue, QName),
+    {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup, [QNameRes]),
+    Q.
+
+wait_for_syncing(Node, QName, Target) ->
+    case state(Node, QName) of
+        {{syncing, _}, _} -> ok;
+        {running, Target} -> synced_already;
+        _                 -> timer:sleep(100),
+                             wait_for_syncing(Node, QName, Target)
+    end.
+
+wait_for_running(Node, QName) ->
+    case state(Node, QName) of
+        {running, _} -> ok;
+        _            -> timer:sleep(100),
+                        wait_for_running(Node, QName)
+    end.
+
+state(Node, QName) ->
+    [{state, State}, {synchronised_slave_pids, Pids}] =
+        rpc:call(Node, rabbit_amqqueue, info,
+                 [queue(Node, QName), [state, synchronised_slave_pids]]),
+    {State, length(Pids)}.
+
+%% eager_sync_cancel_test needs a batch size that's < ?MESSAGE_COUNT
+%% in order to pass, because a SyncBatchSize >= ?MESSAGE_COUNT will
+%% always finish before the test is able to cancel the sync.
+set_app_sync_batch_size(Node) ->
+    rabbit_ct_broker_helpers:control_action(
+      eval, Node,
+      ["application:set_env(rabbit, mirroring_sync_batch_size, 1)."]).
diff --git a/rabbitmq-server/test/gm_SUITE.erl b/rabbitmq-server/test/gm_SUITE.erl
new file mode 100644 (file)
index 0000000..f5ccf75
--- /dev/null
@@ -0,0 +1,205 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(gm_SUITE).
+
+-behaviour(gm).
+
+-include_lib("common_test/include/ct.hrl").
+
+-include("gm_specs.hrl").
+
+-compile(export_all).
+
+-define(RECEIVE_OR_THROW(Body, Bool, Error),
+        receive Body ->
+                true = Bool,
+                passed
+        after 1000 ->
+                throw(Error)
+        end).
+
+all() ->
+    [
+      join_leave,
+      broadcast,
+      confirmed_broadcast,
+      member_death,
+      receive_in_order
+    ].
+
+init_per_suite(Config) ->
+    ok = application:set_env(mnesia, dir, ?config(priv_dir, Config)),
+    ok = application:start(mnesia),
+    {ok, FHC} = file_handle_cache:start_link(),
+    unlink(FHC),
+    {ok, WPS} = worker_pool_sup:start_link(),
+    unlink(WPS),
+    rabbit_ct_helpers:set_config(Config, [
+        {file_handle_cache_pid, FHC},
+        {worker_pool_sup_pid, WPS}
+      ]).
+
+end_per_suite(Config) ->
+    exit(?config(worker_pool_sup_pid, Config), shutdown),
+    exit(?config(file_handle_cache_pid, Config), shutdown),
+    ok = application:stop(mnesia),
+    Config.
+
+%% ---------------------------------------------------------------------------
+%% Functional tests
+%% ---------------------------------------------------------------------------
+
+join_leave(_Config) ->
+    passed = with_two_members(fun (_Pid, _Pid2) -> passed end).
+
+broadcast(_Config) ->
+    passed = do_broadcast(fun gm:broadcast/2).
+
+confirmed_broadcast(_Config) ->
+    passed = do_broadcast(fun gm:confirmed_broadcast/2).
+
+member_death(_Config) ->
+    passed = with_two_members(
+      fun (Pid, Pid2) ->
+              {ok, Pid3} = gm:start_link(
+                             ?MODULE, ?MODULE, self(),
+                             fun rabbit_misc:execute_mnesia_transaction/1),
+              passed = receive_joined(Pid3, [Pid, Pid2, Pid3],
+                                      timeout_joining_gm_group_3),
+              passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1),
+              passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2),
+
+              unlink(Pid3),
+              exit(Pid3, kill),
+
+              %% Have to do some broadcasts to ensure that all members
+              %% find out about the death.
+              passed = (broadcast_fun(fun gm:confirmed_broadcast/2))(
+                         Pid, Pid2),
+
+              passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1),
+              passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2),
+
+              passed
+      end).
+
+receive_in_order(_Config) ->
+    passed = with_two_members(
+      fun (Pid, Pid2) ->
+              Numbers = lists:seq(1,1000),
+              [begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end
+               || N <- Numbers],
+              passed = receive_numbers(
+                         Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers),
+              passed = receive_numbers(
+                         Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers),
+              passed = receive_numbers(
+                         Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers),
+              passed = receive_numbers(
+                         Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers),
+              passed
+      end).
+
+do_broadcast(Fun) ->
+    with_two_members(broadcast_fun(Fun)).
+
+broadcast_fun(Fun) ->
+    fun (Pid, Pid2) ->
+            ok = Fun(Pid, magic_message),
+            passed = receive_or_throw({msg, Pid, Pid, magic_message},
+                                      timeout_waiting_for_msg),
+            passed = receive_or_throw({msg, Pid2, Pid, magic_message},
+                                      timeout_waiting_for_msg)
+    end.
+
+with_two_members(Fun) ->
+    ok = gm:create_tables(),
+
+    {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
+                              fun rabbit_misc:execute_mnesia_transaction/1),
+    passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1),
+
+    {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(),
+                               fun rabbit_misc:execute_mnesia_transaction/1),
+    passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2),
+    passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2),
+
+    passed = Fun(Pid, Pid2),
+
+    ok = gm:leave(Pid),
+    passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1),
+    passed =
+        receive_termination(Pid, normal, timeout_waiting_for_termination_1),
+
+    ok = gm:leave(Pid2),
+    passed =
+        receive_termination(Pid2, normal, timeout_waiting_for_termination_2),
+
+    receive X -> throw({unexpected_message, X})
+    after 0 -> passed
+    end.
+
+receive_or_throw(Pattern, Error) ->
+    ?RECEIVE_OR_THROW(Pattern, true, Error).
+
+receive_birth(From, Born, Error) ->
+    ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
+                      ([Born] == Birth) andalso ([] == Death),
+                      Error).
+
+receive_death(From, Died, Error) ->
+    ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
+                      ([] == Birth) andalso ([Died] == Death),
+                      Error).
+
+receive_joined(From, Members, Error) ->
+    ?RECEIVE_OR_THROW({joined, From, Members1},
+                      lists:usort(Members) == lists:usort(Members1),
+                      Error).
+
+receive_termination(From, Reason, Error) ->
+    ?RECEIVE_OR_THROW({termination, From, Reason1},
+                      Reason == Reason1,
+                      Error).
+
+receive_numbers(_Pid, _Sender, _Error, []) ->
+    passed;
+receive_numbers(Pid, Sender, Error, [N | Numbers]) ->
+    ?RECEIVE_OR_THROW({msg, Pid, Sender, M},
+                      M == N,
+                      Error),
+    receive_numbers(Pid, Sender, Error, Numbers).
+
+%% -------------------------------------------------------------------
+%% gm behavior callbacks.
+%% -------------------------------------------------------------------
+
+joined(Pid, Members) ->
+    Pid ! {joined, self(), Members},
+    ok.
+
+members_changed(Pid, Births, Deaths) ->
+    Pid ! {members_changed, self(), Births, Deaths},
+    ok.
+
+handle_msg(Pid, From, Msg) ->
+    Pid ! {msg, self(), From, Msg},
+    ok.
+
+handle_terminate(Pid, Reason) ->
+    Pid ! {termination, self(), Reason},
+    ok.
diff --git a/rabbitmq-server/test/health_check_SUITE.erl b/rabbitmq-server/test/health_check_SUITE.erl
new file mode 100644 (file)
index 0000000..4d8f56e
--- /dev/null
@@ -0,0 +1,167 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc.  All rights reserved.
+%%
+-module(health_check_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-export([all/0
+        ,groups/0
+        ,init_per_suite/1
+        ,end_per_suite/1
+        ,init_per_testcase/2
+        ,end_per_testcase/2
+        ]).
+
+-export([ignores_remote_dead_channel/1
+        ,detects_local_dead_channel/1
+        ,ignores_remote_dead_queue/1
+        ,detects_local_dead_queue/1
+        ,ignores_remote_alarms/1
+        ,detects_local_alarm/1
+        ,honors_timeout_argument/1
+        ]).
+
+all() ->
+    [{group, all_cases}].
+
+groups() ->
+    [{all_cases, [],
+      [ignores_remote_dead_queue
+      ,detects_local_dead_queue
+      ,ignores_remote_dead_channel
+      ,detects_local_dead_channel
+      ,ignores_remote_alarms
+      ,detects_local_alarm
+      ,honors_timeout_argument
+      ]}].
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_testcase(Testcase, Config0) ->
+    rabbit_ct_helpers:testcase_started(Config0, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(
+                Config0, [{rmq_nodes_count, 2},
+                          {rmq_nodes_clustered, true}]),
+    rabbit_ct_helpers:run_steps(Config1,
+                                rabbit_ct_broker_helpers:setup_steps() ++
+                                rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config0) ->
+    Config1 = case rabbit_ct_helpers:get_config(Config0, save_config) of
+        undefined -> Config0;
+        C         -> C
+    end,
+    Config2 = rabbit_ct_helpers:run_steps(Config1,
+                                          rabbit_ct_client_helpers:teardown_steps() ++
+                                          rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config2, Testcase).
+
+%%----------------------------------------------------------------------------
+%% Test cases
+%%----------------------------------------------------------------------------
+ignores_remote_dead_channel(Config) ->
+    [A, B] = open_channel_and_declare_queue_everywhere(Config),
+    CPid = suspend_single_channel(Config, B),
+    {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, A, ["-t", "5", "node_health_check"]),
+    resume_sys_process(Config, B, CPid),
+    ok.
+
+detects_local_dead_channel(Config) ->
+    [A|_] = open_channel_and_declare_queue_everywhere(Config),
+    CPid = suspend_single_channel(Config, A),
+    {error, 75, Str} = rabbit_ct_broker_helpers:rabbitmqctl(Config, A, ["-t", "5", "node_health_check"]),
+    {match, _} = re:run(Str, "operation node_health_check.*timed out"),
+    resume_sys_process(Config, A, CPid),
+    ok.
+
+ignores_remote_dead_queue(Config) ->
+    [A, B] = open_channel_and_declare_queue_everywhere(Config),
+    QPid = suspend_single_queue(Config, B),
+    {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, A, ["-t", "5", "node_health_check"]),
+    resume_sys_process(Config, B, QPid),
+    ok.
+
+detects_local_dead_queue(Config) ->
+    [A|_] = open_channel_and_declare_queue_everywhere(Config),
+    QPid = suspend_single_queue(Config, A),
+    {error, 75, Str} = rabbit_ct_broker_helpers:rabbitmqctl(Config, A, ["-t", "5", "node_health_check"]),
+    {match, _} = re:run(Str, "operation node_health_check.*timed out"),
+    resume_sys_process(Config, A, QPid),
+    ok.
+
+ignores_remote_alarms(Config) ->
+    [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    rabbit_ct_broker_helpers:rabbitmqctl(Config, B,
+                                         ["set_vm_memory_high_watermark", "0.000000001"]),
+    {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, A, ["-t", "5", "node_health_check"]),
+    ok.
+
+detects_local_alarm(Config) ->
+    [A|_] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    rabbit_ct_broker_helpers:rabbitmqctl(Config, A,
+                                         ["set_vm_memory_high_watermark", "0.000000001"]),
+    {error, 70, Str} = rabbit_ct_broker_helpers:rabbitmqctl(Config, A, ["-t", "5", "node_health_check"]),
+    {match, _} = re:run(Str, "resource alarm.*in effect"),
+    ok.
+
+honors_timeout_argument(Config) ->
+    [A|_] = open_channel_and_declare_queue_everywhere(Config),
+    QPid = suspend_single_queue(Config, A),
+
+    case timer:tc(rabbit_ct_broker_helpers, rabbitmqctl, [Config, A, ["-t", "5", "node_health_check"]]) of
+        {TimeSpent, {error, 75, _}} ->
+            if TimeSpent < 5000000 -> exit({too_fast, TimeSpent});
+               TimeSpent > 7000000 -> exit({too_slow, TimeSpent}); %% +2 seconds for rabbitmqctl overhead
+               true -> ok
+            end;
+        {_, Unexpected} ->
+            exit({unexpected, Unexpected})
+    end,
+    resume_sys_process(Config, A, QPid),
+    ok.
+
+%%----------------------------------------------------------------------------
+%% Helpers
+%%----------------------------------------------------------------------------
+open_channel_and_declare_queue_everywhere(Config) ->
+    Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    lists:foreach(fun(Node) ->
+                      Ch = rabbit_ct_client_helpers:open_channel(Config, Node),
+                      #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{})
+                  end,
+                  Nodes),
+    Nodes.
+
+suspend_single_queue(Config, Node) ->
+    [QPid|_] = [rabbit_amqqueue:pid_of(Q) || Q <- rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_amqqueue, list, []),
+                                             Node == node(rabbit_amqqueue:pid_of(Q))],
+    rabbit_ct_broker_helpers:rpc(Config, Node, sys, suspend, [QPid]),
+    QPid.
+
+suspend_single_channel(Config, Node) ->
+    [CPid|_] = [Pid || Pid <- rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_channel, list_local, []),
+                       Node == node(Pid)],
+    rabbit_ct_broker_helpers:rpc(Config, Node, sys, suspend, [CPid]),
+    CPid.
+
+resume_sys_process(Config, Node, Pid) ->
+    rabbit_ct_broker_helpers:rpc(Config, Node, sys, resume, [Pid]).
diff --git a/rabbitmq-server/test/inet_proxy_dist.erl b/rabbitmq-server/test/inet_proxy_dist.erl
new file mode 100644 (file)
index 0000000..32b7641
--- /dev/null
@@ -0,0 +1,201 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+-module(inet_proxy_dist).
+
+%% A distribution plugin that uses the usual inet_tcp_dist but allows
+%% insertion of a proxy at the receiving end.
+
+%% inet_*_dist "behaviour"
+-export([listen/1, accept/1, accept_connection/5,
+        setup/5, close/1, select/1, is_node_name/1]).
+
+%% For copypasta from inet_tcp_dist
+-export([do_setup/6]).
+-import(error_logger,[error_msg/2]).
+
+-define(REAL, inet_tcp_dist).
+
+%%----------------------------------------------------------------------------
+
+listen(Name)       -> ?REAL:listen(Name).
+select(Node)       -> ?REAL:select(Node).
+accept(Listen)     -> ?REAL:accept(Listen).
+close(Socket)      -> ?REAL:close(Socket).
+is_node_name(Node) -> ?REAL:is_node_name(Node).
+
+accept_connection(AcceptPid, Socket, MyNode, Allowed, SetupTime) ->
+    ?REAL:accept_connection(AcceptPid, Socket, MyNode, Allowed, SetupTime).
+
+%% This is copied from inet_tcp_dist, in order to change the
+%% output of erl_epmd:port_please/2.
+
+-include_lib("kernel/include/net_address.hrl").
+-include_lib("kernel/include/dist_util.hrl").
+
+setup(Node, Type, MyNode, LongOrShortNames,SetupTime) ->
+    spawn_opt(?MODULE, do_setup, 
+             [self(), Node, Type, MyNode, LongOrShortNames, SetupTime],
+             [link, {priority, max}]).
+
+do_setup(Kernel, Node, Type, MyNode, LongOrShortNames,SetupTime) ->
+    ?trace("~p~n",[{inet_tcp_dist,self(),setup,Node}]),
+    [Name, Address] = splitnode(Node, LongOrShortNames),
+    case inet:getaddr(Address, inet) of
+       {ok, Ip} ->
+           Timer = dist_util:start_timer(SetupTime),
+           case erl_epmd:port_please(Name, Ip) of
+               {port, TcpPort, Version} ->
+                   ?trace("port_please(~p) -> version ~p~n", 
+                          [Node,Version]),
+                   dist_util:reset_timer(Timer),
+                    %% Modification START
+                    Ret = application:get_env(kernel,
+                      dist_and_proxy_ports_map),
+                    PortsMap = case Ret of
+                        {ok, M}   -> M;
+                        undefined -> []
+                    end,
+                    ProxyPort = case inet_tcp_proxy:is_enabled() of
+                        true  -> proplists:get_value(TcpPort, PortsMap, TcpPort);
+                        false -> TcpPort
+                    end,
+                   case inet_tcp:connect(Ip, ProxyPort,
+                                         [{active, false},
+                                          {packet,2}]) of
+                       {ok, Socket} ->
+                            {ok, {_, SrcPort}} = inet:sockname(Socket),
+                            ok = inet_tcp_proxy_manager:register(
+                                   node(), Node, SrcPort, TcpPort, ProxyPort),
+                    %% Modification END
+                           HSData = #hs_data{
+                             kernel_pid = Kernel,
+                             other_node = Node,
+                             this_node = MyNode,
+                             socket = Socket,
+                             timer = Timer,
+                             this_flags = 0,
+                             other_version = Version,
+                             f_send = fun inet_tcp:send/2,
+                             f_recv = fun inet_tcp:recv/3,
+                             f_setopts_pre_nodeup = 
+                             fun(S) ->
+                                     inet:setopts
+                                       (S, 
+                                        [{active, false},
+                                         {packet, 4},
+                                         nodelay()])
+                             end,
+                             f_setopts_post_nodeup = 
+                             fun(S) ->
+                                     inet:setopts
+                                       (S, 
+                                        [{active, true},
+                                         {deliver, port},
+                                         {packet, 4},
+                                         nodelay()])
+                             end,
+                             f_getll = fun inet:getll/1,
+                             f_address = 
+                             fun(_,_) ->
+                                     #net_address{
+                                  address = {Ip,TcpPort},
+                                  host = Address,
+                                  protocol = tcp,
+                                  family = inet}
+                             end,
+                             mf_tick = fun tick/1,
+                             mf_getstat = fun inet_tcp_dist:getstat/1,
+                             request_type = Type
+                            },
+                           dist_util:handshake_we_started(HSData);
+                       R ->
+                            io:format("~p failed! ~p~n", [node(), R]),
+                           %% Other Node may have closed since 
+                           %% port_please !
+                           ?trace("other node (~p) "
+                                  "closed since port_please.~n", 
+                                  [Node]),
+                           ?shutdown(Node)
+                   end;
+               _ ->
+                   ?trace("port_please (~p) "
+                          "failed.~n", [Node]),
+                   ?shutdown(Node)
+           end;
+       _Other ->
+           ?trace("inet_getaddr(~p) "
+                  "failed (~p).~n", [Node,_Other]),
+           ?shutdown(Node)
+    end.
+
+%% If Node is illegal terminate the connection setup!!
+splitnode(Node, LongOrShortNames) ->
+    case split_node(atom_to_list(Node), $@, []) of
+       [Name|Tail] when Tail =/= [] ->
+           Host = lists:append(Tail),
+           case split_node(Host, $., []) of
+               [_] when LongOrShortNames =:= longnames ->
+                   error_msg("** System running to use "
+                             "fully qualified "
+                             "hostnames **~n"
+                             "** Hostname ~s is illegal **~n",
+                             [Host]),
+                   ?shutdown(Node);
+               L when length(L) > 1, LongOrShortNames =:= shortnames ->
+                   error_msg("** System NOT running to use fully qualified "
+                             "hostnames **~n"
+                             "** Hostname ~s is illegal **~n",
+                             [Host]),
+                   ?shutdown(Node);
+               _ ->
+                   [Name, Host]
+           end;
+       [_] ->
+           error_msg("** Nodename ~p illegal, no '@' character **~n",
+                     [Node]),
+           ?shutdown(Node);
+       _ ->
+           error_msg("** Nodename ~p illegal **~n", [Node]),
+           ?shutdown(Node)
+    end.
+
+split_node([Chr|T], Chr, Ack) -> [lists:reverse(Ack)|split_node(T, Chr, [])];
+split_node([H|T], Chr, Ack)   -> split_node(T, Chr, [H|Ack]);
+split_node([], _, Ack)        -> [lists:reverse(Ack)].
+
+%% we may not always want the nodelay behaviour
+%% for performance reasons
+
+nodelay() ->
+    case application:get_env(kernel, dist_nodelay) of
+       undefined ->
+           {nodelay, true};
+       {ok, true} ->
+           {nodelay, true};
+       {ok, false} ->
+           {nodelay, false};
+       _ ->
+           {nodelay, true}
+    end.
+
+tick(Socket) ->
+    case inet_tcp:send(Socket, [], [force]) of
+        {error, closed} ->
+            self() ! {tcp_closed, Socket},
+            {error, closed};
+        R ->
+            R
+    end.
diff --git a/rabbitmq-server/test/inet_tcp_proxy.erl b/rabbitmq-server/test/inet_tcp_proxy.erl
new file mode 100644 (file)
index 0000000..4498b8f
--- /dev/null
@@ -0,0 +1,134 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+-module(inet_tcp_proxy).
+
+%% A TCP proxy for insertion into the Erlang distribution mechanism,
+%% which allows us to simulate network partitions.
+
+-export([start/3, reconnect/1, is_enabled/0, allow/1, block/1]).
+
+-define(TABLE, ?MODULE).
+
+%% This can't start_link because there's no supervision hierarchy we
+%% can easily fit it into (we need to survive all application
+%% restarts). So we have to do some horrible error handling.
+
+start(ManagerNode, DistPort, ProxyPort) ->
+    application:set_env(kernel, inet_tcp_proxy_manager_node, ManagerNode),
+    Parent = self(),
+    Pid = spawn(error_handler(fun() -> go(Parent, DistPort, ProxyPort) end)),
+    MRef = erlang:monitor(process, Pid),
+    receive
+        ready ->
+            erlang:demonitor(MRef),
+            ok;
+        {'DOWN', MRef, _, _, Reason} ->
+            {error, Reason}
+    end.
+
+reconnect(Nodes) ->
+    [erlang:disconnect_node(N) || N <- Nodes, N =/= node()],
+    ok.
+
+is_enabled() ->
+    lists:member(?TABLE, ets:all()).
+
+allow(Node) ->
+    rabbit_log:info("(~s) Allowing distribution between ~s and ~s~n",
+      [?MODULE, node(), Node]),
+    ets:delete(?TABLE, Node).
+block(Node) ->
+    rabbit_log:info("(~s) BLOCKING distribution between ~s and ~s~n",
+      [?MODULE, node(), Node]),
+    ets:insert(?TABLE, {Node, block}).
+
+%%----------------------------------------------------------------------------
+
+error_handler(Thunk) ->
+    fun () ->
+            try
+                Thunk()
+            catch _:{{nodedown, _}, _} ->
+                    %% The only other node we ever talk to is the test
+                    %% runner; if that's down then the test is nearly
+                    %% over; die quietly.
+                    ok;
+                  _:X ->
+                    io:format(user, "TCP proxy died with ~p~n At ~p~n",
+                              [X, erlang:get_stacktrace()]),
+                    erlang:halt(1)
+            end
+    end.
+
+go(Parent, Port, ProxyPort) ->
+    ets:new(?TABLE, [public, named_table]),
+    {ok, Sock} = gen_tcp:listen(ProxyPort, [inet,
+                                            {reuseaddr, true}]),
+    Parent ! ready,
+    accept_loop(Sock, Port).
+
+accept_loop(ListenSock, Port) ->
+    {ok, Sock} = gen_tcp:accept(ListenSock),
+    Proxy = spawn(error_handler(fun() -> run_it(Sock, Port) end)),
+    ok = gen_tcp:controlling_process(Sock, Proxy),
+    accept_loop(ListenSock, Port).
+
+run_it(SockIn, Port) ->
+    case {inet:peername(SockIn), inet:sockname(SockIn)} of
+        {{ok, {_Addr, SrcPort}}, {ok, {Addr, _OtherPort}}} ->
+            {ok, Remote, This} = inet_tcp_proxy_manager:lookup(SrcPort),
+            case node() of
+                This  -> ok;
+                _     -> exit({not_me, node(), This})
+            end,
+            {ok, SockOut} = gen_tcp:connect(Addr, Port, [inet]),
+            run_loop({SockIn, SockOut}, Remote, []);
+        _ ->
+            ok
+    end.
+
+run_loop(Sockets, RemoteNode, Buf0) ->
+    Block = [{RemoteNode, block}] =:= ets:lookup(?TABLE, RemoteNode),
+    receive
+        {tcp, Sock, Data} ->
+            Buf = [Data | Buf0],
+            case {Block, get(dist_was_blocked)} of
+                {true, false} ->
+                    put(dist_was_blocked, Block),
+                    rabbit_log:warning(
+                      "(~s) Distribution BLOCKED between ~s and ~s~n",
+                      [?MODULE, node(), RemoteNode]);
+                {false, S} when S =:= true orelse S =:= undefined ->
+                    put(dist_was_blocked, Block),
+                    rabbit_log:warning(
+                      "(~s) Distribution allowed between ~s and ~s~n",
+                      [?MODULE, node(), RemoteNode]);
+                _ ->
+                    ok
+            end,
+            case Block of
+                false -> gen_tcp:send(other(Sock, Sockets), lists:reverse(Buf)),
+                         run_loop(Sockets, RemoteNode, []);
+                true  -> run_loop(Sockets, RemoteNode, Buf)
+            end;
+        {tcp_closed, Sock} ->
+            gen_tcp:close(other(Sock, Sockets));
+        X ->
+            exit({weirdness, X})
+    end.
+
+other(A, {A, B}) -> B;
+other(B, {A, B}) -> A.
diff --git a/rabbitmq-server/test/inet_tcp_proxy_manager.erl b/rabbitmq-server/test/inet_tcp_proxy_manager.erl
new file mode 100644 (file)
index 0000000..18255b8
--- /dev/null
@@ -0,0 +1,107 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+-module(inet_tcp_proxy_manager).
+
+%% The TCP proxies need to decide whether to block based on the node
+%% they're running on, and the node connecting to them. The trouble
+%% is, they don't have an easy way to determine the latter. Therefore
+%% when A connects to B we register the source port used by A here, so
+%% that B can later look it up and find out who A is without having to
+%% sniff the distribution protocol.
+%%
+%% That does unfortunately mean that we need a central control
+%% thing. We assume here it's running on the node called
+%% 'standalone_test' since that's where tests are orchestrated from.
+%%
+%% Yes, this leaks. For its intended lifecycle, that's fine.
+
+-behaviour(gen_server).
+
+-export([start/0, register/5, lookup/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+         code_change/3]).
+
+-define(NODE, ct).
+
+-record(state, {ports, pending}).
+
+start() ->
+    gen_server:start({local, ?MODULE}, ?MODULE, [], []).
+
+register(_From, _To, _SrcPort, Port, Port) ->
+    %% No proxy, don't register
+    ok;
+register(From, To, SrcPort, _Port, _ProxyPort) ->
+    gen_server:call(name(), {register, From, To, SrcPort}, infinity).
+
+lookup(SrcPort) ->
+    gen_server:call(name(), {lookup, SrcPort}, infinity).
+
+controller_node() ->
+    {ok, ManagerNode} = application:get_env(kernel,
+      inet_tcp_proxy_manager_node),
+    ManagerNode.
+
+name() ->
+    {?MODULE, controller_node()}.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+    net_kernel:monitor_nodes(true),
+    {ok, #state{ports   = dict:new(),
+                pending = []}}.
+
+handle_call({register, FromNode, ToNode, SrcPort}, _From,
+            State = #state{ports   = Ports,
+                           pending = Pending}) ->
+    {Notify, Pending2} =
+        lists:partition(fun ({P, _}) -> P =:= SrcPort end, Pending),
+    [gen_server:reply(From, {ok, FromNode, ToNode}) || {_, From} <- Notify],
+    {reply, ok,
+     State#state{ports   = dict:store(SrcPort, {FromNode, ToNode}, Ports),
+                 pending = Pending2}};
+
+handle_call({lookup, SrcPort}, From,
+            State = #state{ports = Ports, pending = Pending}) ->
+    case dict:find(SrcPort, Ports) of
+        {ok, {FromNode, ToNode}} ->
+            {reply, {ok, FromNode, ToNode}, State};
+        error ->
+            {noreply, State#state{pending = [{SrcPort, From} | Pending]}}
+    end;
+
+handle_call(_Req, _From, State) ->
+    {reply, unknown_request, State}.
+
+handle_cast(_C, State) ->
+    {noreply, State}.
+
+handle_info({nodedown, Node}, State = #state{ports = Ports}) ->
+    Ports1 = dict:filter(
+               fun (_, {From, To}) ->
+                       Node =/= From andalso Node =/= To
+               end, Ports),
+    {noreply, State#state{ports = Ports1}};
+
+handle_info(_I, State) ->
+    {noreply, State}.
+
+terminate(_Reason, _State) ->
+    ok.
+
+code_change(_, State, _) -> {ok, State}.
diff --git a/rabbitmq-server/test/lazy_queue_SUITE.erl b/rabbitmq-server/test/lazy_queue_SUITE.erl
new file mode 100644 (file)
index 0000000..fe105cd
--- /dev/null
@@ -0,0 +1,224 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(lazy_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(QNAME, <<"queue.mode.test">>).
+-define(MESSAGE_COUNT, 2000).
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+      {non_parallel_tests, [], [
+          declare_args,
+          queue_mode_policy,
+          publish_consume
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+    Config.
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase),
+    ClusterSize = 2,
+    TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, ClusterSize},
+        {rmq_nodes_clustered, true},
+        {rmq_nodename_suffix, Testcase},
+        {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps() ++ [
+        fun rabbit_ct_broker_helpers:set_ha_policy_all/1
+      ]).
+
+end_per_testcase(Testcase, Config) ->
+    Config1 = rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+declare_args(Config) ->
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+    Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+    LQ = <<"lazy-q">>,
+    declare(Ch, LQ, [{<<"x-queue-mode">>, longstr, <<"lazy">>}]),
+    assert_queue_mode(A, LQ, lazy),
+
+    DQ = <<"default-q">>,
+    declare(Ch, DQ, [{<<"x-queue-mode">>, longstr, <<"default">>}]),
+    assert_queue_mode(A, DQ, default),
+
+    DQ2 = <<"default-q2">>,
+    declare(Ch, DQ2),
+    assert_queue_mode(A, DQ2, default),
+
+    passed.
+
+queue_mode_policy(Config) ->
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+    set_ha_mode_policy(Config, A, <<"lazy">>),
+
+    Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+    LQ = <<"lazy-q">>,
+    declare(Ch, LQ, [{<<"x-queue-mode">>, longstr, <<"lazy">>}]),
+    assert_queue_mode(A, LQ, lazy),
+
+    LQ2 = <<"lazy-q-2">>,
+    declare(Ch, LQ2),
+    assert_queue_mode(A, LQ2, lazy),
+
+    DQ = <<"default-q">>,
+    declare(Ch, DQ, [{<<"x-queue-mode">>, longstr, <<"default">>}]),
+    assert_queue_mode(A, DQ, default),
+
+    set_ha_mode_policy(Config, A, <<"default">>),
+
+    ok = wait_for_queue_mode(A, LQ,  lazy, 5000),
+    ok = wait_for_queue_mode(A, LQ2, default, 5000),
+    ok = wait_for_queue_mode(A, DQ,  default, 5000),
+
+    passed.
+
+publish_consume(Config) ->
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+    Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+    declare(Ch, ?QNAME),
+
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+    consume(Ch, ?QNAME, ack),
+    [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+    set_ha_mode_policy(Config, A, <<"lazy">>),
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+    [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+    set_ha_mode_policy(Config, A, <<"default">>),
+    [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+    set_ha_mode_policy(Config, A, <<"lazy">>),
+    rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+    set_ha_mode_policy(Config, A, <<"default">>),
+    [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+    set_ha_mode_policy(Config, A, <<"lazy">>),
+    [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+    cancel(Ch),
+
+    passed.
+
+%%----------------------------------------------------------------------------
+
+declare(Ch, Q) ->
+    declare(Ch, Q, []).
+
+declare(Ch, Q, Args) ->
+    amqp_channel:call(Ch, #'queue.declare'{queue     = Q,
+                                           durable   = true,
+                                           arguments = Args}).
+
+consume(Ch, Q, Ack) ->
+    amqp_channel:subscribe(Ch, #'basic.consume'{queue        = Q,
+                                                no_ack       = Ack =:= no_ack,
+                                                consumer_tag = <<"ctag">>},
+                           self()),
+    receive
+        #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+             ok
+    end.
+
+cancel(Ch) ->
+    amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = <<"ctag">>}).
+
+assert_delivered(Ch, Ack, Payload) ->
+    PBin = payload2bin(Payload),
+    receive
+        {#'basic.deliver'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} ->
+            PBin = PBin2,
+            maybe_ack(Ch, Ack, DTag)
+    end.
+
+maybe_ack(Ch, do_ack, DTag) ->
+    amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+    DTag;
+maybe_ack(_Ch, _, DTag) ->
+    DTag.
+
+payload2bin(Int) -> list_to_binary(integer_to_list(Int)).
+
+set_ha_mode_policy(Config, Node, Mode) ->
+    ok = rabbit_ct_broker_helpers:set_ha_policy(Config, Node, <<".*">>, <<"all">>,
+      [{<<"queue-mode">>, Mode}]).
+
+
+wait_for_queue_mode(_Node, _Q, _Mode, Max) when Max < 0 ->
+    fail;
+wait_for_queue_mode(Node, Q, Mode, Max) ->
+    case get_queue_mode(Node, Q) of
+        Mode  -> ok;
+        _     -> timer:sleep(100),
+                 wait_for_queue_mode(Node, Q, Mode, Max - 100)
+    end.
+
+assert_queue_mode(Node, Q, Expected) ->
+    Actual = get_queue_mode(Node, Q),
+    Expected = Actual.
+
+get_queue_mode(Node, Q) ->
+    QNameRes = rabbit_misc:r(<<"/">>, queue, Q),
+    {ok, AMQQueue} =
+        rpc:call(Node, rabbit_amqqueue, lookup, [QNameRes]),
+    [{backing_queue_status, Status}] =
+        rpc:call(Node, rabbit_amqqueue, info,
+                 [AMQQueue, [backing_queue_status]]),
+    proplists:get_value(mode, Status).
diff --git a/rabbitmq-server/test/many_node_ha_SUITE.erl b/rabbitmq-server/test/many_node_ha_SUITE.erl
new file mode 100644 (file)
index 0000000..22b39e7
--- /dev/null
@@ -0,0 +1,117 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(many_node_ha_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+    [
+      {group, cluster_size_6}
+    ].
+
+groups() ->
+    [
+      {cluster_size_6, [], [
+          kill_intermediate
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_6, Config) ->
+    rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 6}
+      ]).
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase),
+    ClusterSize = ?config(rmq_nodes_count, Config),
+    TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_clustered, true},
+        {rmq_nodename_suffix, Testcase},
+        {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps() ++ [
+        fun rabbit_ct_broker_helpers:set_ha_policy_all/1
+      ]).
+
+end_per_testcase(Testcase, Config) ->
+    Config1 = rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+kill_intermediate(Config) ->
+    [A, B, C, D, E, F] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    Msgs            = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+    MasterChannel   = rabbit_ct_client_helpers:open_channel(Config, A),
+    ConsumerChannel = rabbit_ct_client_helpers:open_channel(Config, E),
+    ProducerChannel = rabbit_ct_client_helpers:open_channel(Config, F),
+    Queue = <<"test">>,
+    amqp_channel:call(MasterChannel, #'queue.declare'{queue       = Queue,
+                                                      auto_delete = false}),
+
+    %% TODO: this seems *highly* timing dependant - the assumption being
+    %% that the kill will work quickly enough that there will still be
+    %% some messages in-flight that we *must* receive despite the intervening
+    %% node deaths. It would be nice if we could find a means to do this
+    %% in a way that is not actually timing dependent.
+
+    %% Worse still, it assumes that killing the master will cause a
+    %% failover to Slave1, and so on. Nope.
+
+    ConsumerPid = rabbit_ha_test_consumer:create(ConsumerChannel,
+                                                 Queue, self(), false, Msgs),
+
+    ProducerPid = rabbit_ha_test_producer:create(ProducerChannel,
+                                                 Queue, self(), false, Msgs),
+
+    %% create a killer for the master and the first 3 slaves
+    [rabbit_ct_broker_helpers:kill_node_after(Config, Node, Time) ||
+        {Node, Time} <- [{A, 50},
+                         {B, 50},
+                         {C, 100},
+                         {D, 100}]],
+
+    %% verify that the consumer got all msgs, or die, or time out
+    rabbit_ha_test_producer:await_response(ProducerPid),
+    rabbit_ha_test_consumer:await_response(ConsumerPid),
+    ok.
+
diff --git a/rabbitmq-server/test/mirrored_supervisor_SUITE.erl b/rabbitmq-server/test/mirrored_supervisor_SUITE.erl
new file mode 100644 (file)
index 0000000..5ed17c9
--- /dev/null
@@ -0,0 +1,335 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(mirrored_supervisor_SUITE).
+
+-behaviour(mirrored_supervisor).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+-define(MS,     mirrored_supervisor).
+-define(SERVER, mirrored_supervisor_SUITE_gs).
+
+all() ->
+    [
+      migrate,
+      migrate_twice,
+      already_there,
+      delete_restart,
+      which_children,
+      large_group,
+      childspecs_at_init,
+      anonymous_supervisors,
+      no_migration_on_shutdown,
+      start_idempotence,
+      unsupported,
+      ignore,
+      startup_failure
+    ].
+
+init_per_suite(Config) ->
+    ok = application:set_env(mnesia, dir, ?config(priv_dir, Config)),
+    ok = application:start(mnesia),
+    lists:foreach(
+      fun ({Tab, TabDef}) ->
+              TabDef1 = proplists:delete(match, TabDef),
+              case mnesia:create_table(Tab, TabDef1) of
+                  {atomic, ok} ->
+                      ok;
+                  {aborted, Reason} ->
+                      throw({error,
+                          {table_creation_failed, Tab, TabDef1, Reason}})
+              end
+      end, mirrored_supervisor:table_definitions()),
+    Config.
+
+end_per_suite(Config) ->
+    ok = application:stop(mnesia),
+    Config.
+
+%% ---------------------------------------------------------------------------
+%% Functional tests
+%% ---------------------------------------------------------------------------
+
+%% Simplest test
+migrate(_Config) ->
+    passed = with_sups(
+      fun([A, _]) ->
+              {ok, _} = ?MS:start_child(a, childspec(worker)),
+              Pid1 = pid_of(worker),
+              kill_registered(A, Pid1),
+              Pid2 = pid_of(worker),
+              false = (Pid1 =:= Pid2)
+      end, [a, b]).
+
+%% Is migration transitive?
+migrate_twice(_Config) ->
+    passed = with_sups(
+      fun([A, B]) ->
+              {ok, _} = ?MS:start_child(a, childspec(worker)),
+              Pid1 = pid_of(worker),
+              kill_registered(A, Pid1),
+              {ok, C} = start_sup(c),
+              Pid2 = pid_of(worker),
+              kill_registered(B, Pid2),
+              Pid3 = pid_of(worker),
+              false = (Pid1 =:= Pid3),
+              kill(C)
+      end, [a, b]).
+
+%% Can't start the same child twice
+already_there(_Config) ->
+    passed = with_sups(
+      fun([_, _]) ->
+              S = childspec(worker),
+              {ok, Pid}                       = ?MS:start_child(a, S),
+              {error, {already_started, Pid}} = ?MS:start_child(b, S)
+      end, [a, b]).
+
+%% Deleting and restarting should work as per a normal supervisor
+delete_restart(_Config) ->
+    passed = with_sups(
+      fun([_, _]) ->
+              S = childspec(worker),
+              {ok, Pid1} = ?MS:start_child(a, S),
+              {error, running} = ?MS:delete_child(a, worker),
+              ok = ?MS:terminate_child(a, worker),
+              ok = ?MS:delete_child(a, worker),
+              {ok, Pid2} = ?MS:start_child(b, S),
+              false = (Pid1 =:= Pid2),
+              ok = ?MS:terminate_child(b, worker),
+              {ok, Pid3} = ?MS:restart_child(b, worker),
+              Pid3 = pid_of(worker),
+              false = (Pid2 =:= Pid3),
+              %% Not the same supervisor as the worker is on
+              ok = ?MS:terminate_child(a, worker),
+              ok = ?MS:delete_child(a, worker),
+              {ok, Pid4} = ?MS:start_child(a, S),
+              false = (Pid3 =:= Pid4)
+      end, [a, b]).
+
+which_children(_Config) ->
+    passed = with_sups(
+      fun([A, B] = Both) ->
+              ?MS:start_child(A, childspec(worker)),
+              assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
+              ok = ?MS:terminate_child(a, worker),
+              assert_wc(Both, fun ([C]) -> undefined = wc_pid(C) end),
+              {ok, _} = ?MS:restart_child(a, worker),
+              assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
+              ?MS:start_child(B, childspec(worker2)),
+              assert_wc(Both, fun (C) -> 2 = length(C) end)
+      end, [a, b]).
+
+assert_wc(Sups, Fun) ->
+    [Fun(?MS:which_children(Sup)) || Sup <- Sups].
+
+wc_pid(Child) ->
+    {worker, Pid, worker, [?MODULE]} = Child,
+    Pid.
+
+%% Not all the members of the group should actually do the failover
+large_group(_Config) ->
+    passed = with_sups(
+      fun([A, _, _, _]) ->
+              {ok, _} = ?MS:start_child(a, childspec(worker)),
+              Pid1 = pid_of(worker),
+              kill_registered(A, Pid1),
+              Pid2 = pid_of(worker),
+              false = (Pid1 =:= Pid2)
+      end, [a, b, c, d]).
+
+%% Do childspecs work when returned from init?
+childspecs_at_init(_Config) ->
+    S = childspec(worker),
+    passed = with_sups(
+      fun([A, _]) ->
+              Pid1 = pid_of(worker),
+              kill_registered(A, Pid1),
+              Pid2 = pid_of(worker),
+              false = (Pid1 =:= Pid2)
+      end, [{a, [S]}, {b, [S]}]).
+
+anonymous_supervisors(_Config) ->
+    passed = with_sups(
+      fun([A, _B]) ->
+              {ok, _} = ?MS:start_child(A, childspec(worker)),
+              Pid1 = pid_of(worker),
+              kill_registered(A, Pid1),
+              Pid2 = pid_of(worker),
+              false = (Pid1 =:= Pid2)
+      end, [anon, anon]).
+
+%% When a mirrored_supervisor terminates, we should not migrate, but
+%% the whole supervisor group should shut down. To test this we set up
+%% a situation where the gen_server will only fail if it's running
+%% under the supervisor called 'evil'. It should not migrate to
+%% 'good' and survive, rather the whole group should go away.
+no_migration_on_shutdown(_Config) ->
+    passed = with_sups(
+      fun([Evil, _]) ->
+              {ok, _} = ?MS:start_child(Evil, childspec(worker)),
+              try
+                  call(worker, ping, 1000, 100),
+                  exit(worker_should_not_have_migrated)
+                  catch exit:{timeout_waiting_for_server, _, _} ->
+                      ok
+              end
+      end, [evil, good]).
+
+start_idempotence(_Config) ->
+    passed = with_sups(
+      fun([_]) ->
+              CS = childspec(worker),
+              {ok, Pid}                       = ?MS:start_child(a, CS),
+              {error, {already_started, Pid}} = ?MS:start_child(a, CS),
+              ?MS:terminate_child(a, worker),
+              {error, already_present}        = ?MS:start_child(a, CS)
+      end, [a]).
+
+unsupported(_Config) ->
+    try
+        ?MS:start_link({global, foo}, get_group(group), fun tx_fun/1, ?MODULE,
+                       {one_for_one, []}),
+        exit(no_global)
+    catch error:badarg ->
+        ok
+    end,
+    try
+        {ok, _} = ?MS:start_link({local, foo}, get_group(group),
+          fun tx_fun/1, ?MODULE, {simple_one_for_one, []}),
+        exit(no_sofo)
+    catch error:badarg ->
+        ok
+    end.
+
+%% Just test we don't blow up
+ignore(_Config) ->
+    ?MS:start_link({local, foo}, get_group(group), fun tx_fun/1, ?MODULE,
+                   {fake_strategy_for_ignore, []}).
+
+startup_failure(_Config) ->
+    [test_startup_failure(F) || F <- [want_error, want_exit]].
+
+test_startup_failure(Fail) ->
+    process_flag(trap_exit, true),
+    ?MS:start_link(get_group(group), fun tx_fun/1, ?MODULE,
+                   {one_for_one, [childspec(Fail)]}),
+    receive
+        {'EXIT', _, shutdown} ->
+            ok
+    after 1000 ->
+            exit({did_not_exit, Fail})
+    end,
+    process_flag(trap_exit, false).
+
+%% ---------------------------------------------------------------------------
+
+with_sups(Fun, Sups) ->
+    inc_group(),
+    Pids = [begin {ok, Pid} = start_sup(Sup), Pid end || Sup <- Sups],
+    Fun(Pids),
+    [kill(Pid) || Pid <- Pids, is_process_alive(Pid)],
+    timer:sleep(500),
+    passed.
+
+start_sup(Spec) ->
+    start_sup(Spec, group).
+
+start_sup({Name, ChildSpecs}, Group) ->
+    {ok, Pid} = start_sup0(Name, get_group(Group), ChildSpecs),
+    %% We are not a supervisor, when we kill the supervisor we do not
+    %% want to die!
+    unlink(Pid),
+    {ok, Pid};
+
+start_sup(Name, Group) ->
+    start_sup({Name, []}, Group).
+
+start_sup0(anon, Group, ChildSpecs) ->
+    ?MS:start_link(Group, fun tx_fun/1, ?MODULE,
+                   {one_for_one, ChildSpecs});
+
+start_sup0(Name, Group, ChildSpecs) ->
+    ?MS:start_link({local, Name}, Group, fun tx_fun/1, ?MODULE,
+                   {one_for_one, ChildSpecs}).
+
+childspec(Id) ->
+    {Id,{?SERVER, start_link, [Id]}, transient, 16#ffffffff, worker, [?MODULE]}.
+
+pid_of(Id) ->
+    {received, Pid, ping} = call(Id, ping),
+    Pid.
+
+tx_fun(Fun) ->
+    case mnesia:sync_transaction(Fun) of
+        {atomic,  Result}         -> Result;
+        {aborted, Reason}         -> throw({error, Reason})
+    end.
+
+inc_group() ->
+    Count = case get(counter) of
+                undefined -> 0;
+                C         -> C
+            end + 1,
+    put(counter, Count).
+
+get_group(Group) ->
+    {Group, get(counter)}.
+
+call(Id, Msg) -> call(Id, Msg, 10*1000, 100).
+
+call(Id, Msg, 0, _Decr) ->
+    exit({timeout_waiting_for_server, {Id, Msg}, erlang:get_stacktrace()});
+
+call(Id, Msg, MaxDelay, Decr) ->
+    try
+        gen_server:call(Id, Msg, infinity)
+    catch exit:_ -> timer:sleep(Decr),
+                    call(Id, Msg, MaxDelay - Decr, Decr)
+    end.
+
+kill(Pid) -> kill(Pid, []).
+kill(Pid, Wait) when is_pid(Wait) -> kill(Pid, [Wait]);
+kill(Pid, Waits) ->
+    erlang:monitor(process, Pid),
+    [erlang:monitor(process, P) || P <- Waits],
+    exit(Pid, bang),
+    kill_wait(Pid),
+    [kill_wait(P) || P <- Waits].
+
+kill_registered(Pid, Child) ->
+    {registered_name, Name} = erlang:process_info(Child, registered_name),
+    kill(Pid, Child),
+    false = (Child =:= whereis(Name)),
+    ok.
+
+kill_wait(Pid) ->
+    receive
+        {'DOWN', _Ref, process, Pid, _Reason} ->
+            ok
+    end.
+
+%% ---------------------------------------------------------------------------
+
+init({fake_strategy_for_ignore, _ChildSpecs}) ->
+    ignore;
+
+init({Strategy, ChildSpecs}) ->
+    {ok, {{Strategy, 0, 1}, ChildSpecs}}.
+
diff --git a/rabbitmq-server/test/mirrored_supervisor_SUITE_gs.erl b/rabbitmq-server/test/mirrored_supervisor_SUITE_gs.erl
new file mode 100644 (file)
index 0000000..867754b
--- /dev/null
@@ -0,0 +1,66 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(mirrored_supervisor_SUITE_gs).
+
+%% Dumb gen_server we can supervise
+
+-export([start_link/1]).
+
+-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
+         handle_cast/2]).
+
+-behaviour(gen_server).
+
+-define(MS,  mirrored_supervisor).
+
+start_link(want_error) ->
+    {error, foo};
+
+start_link(want_exit) ->
+    exit(foo);
+
+start_link(Id) ->
+    gen_server:start_link({local, Id}, ?MODULE, [], []).
+
+%% ---------------------------------------------------------------------------
+
+init([]) ->
+    {ok, state}.
+
+handle_call(Msg, _From, State) ->
+    die_if_my_supervisor_is_evil(),
+    {reply, {received, self(), Msg}, State}.
+
+handle_cast(_Msg, State) ->
+    {noreply, State}.
+
+handle_info(_Info, State) ->
+    {noreply, State}.
+
+terminate(_Reason, _State) ->
+    ok.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+die_if_my_supervisor_is_evil() ->
+    try lists:keysearch(self(), 2, ?MS:which_children(evil)) of
+        false -> ok;
+        _     -> exit(doooom)
+    catch
+        exit:{noproc, _} -> ok
+    end.
diff --git a/rabbitmq-server/test/msg_store_SUITE.erl b/rabbitmq-server/test/msg_store_SUITE.erl
new file mode 100644 (file)
index 0000000..f63f6cb
--- /dev/null
@@ -0,0 +1,62 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(msg_store_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+-define(T(Fun, Args), (catch apply(rabbit, Fun, Args))).
+
+all() ->
+    [
+      parameter_validation
+    ].
+
+parameter_validation(_Config) ->
+    %% make sure it works with default values
+    ok = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+            [?CREDIT_DISC_BOUND, ?IO_BATCH_SIZE]),
+
+    %% IO_BATCH_SIZE must be greater than CREDIT_DISC_BOUND initial credit
+    ok = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+            [{2000, 500}, 3000]),
+    {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+                    [{2000, 500}, 1500]),
+
+    %% All values must be integers
+    {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+                    [{2000, 500}, "1500"]),
+    {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+                    [{"2000", 500}, abc]),
+    {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+                    [{2000, "500"}, 2048]),
+
+    %% CREDIT_DISC_BOUND must be a tuple
+    {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+                    [[2000, 500], 1500]),
+    {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+                    [2000, 1500]),
+
+    %% config values can't be smaller than default values
+    {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+                    [{1999, 500}, 2048]),
+    {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+                    [{2000, 499}, 2048]),
+    {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+                    [{2000, 500}, 2047]).
diff --git a/rabbitmq-server/test/partitions_SUITE.erl b/rabbitmq-server/test/partitions_SUITE.erl
new file mode 100644 (file)
index 0000000..1b901b5
--- /dev/null
@@ -0,0 +1,413 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(partitions_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-import(rabbit_ct_broker_helpers, [enable_dist_proxy_manager/1,
+                                   enable_dist_proxy/1,
+                                   enable_dist_proxy_on_node/3]).
+
+%% We set ticktime to 1s and setuptime is 7s so to make sure it
+%% passes...
+-define(DELAY, 8000).
+
+all() ->
+    [
+      {group, net_ticktime_1},
+      {group, net_ticktime_10}
+    ].
+
+groups() ->
+    [
+      {net_ticktime_1, [], [
+          {cluster_size_2, [], [
+              ctl_ticktime_sync,
+              prompt_disconnect_detection
+            ]},
+          {cluster_size_3, [], [
+              autoheal,
+              autoheal_after_pause_if_all_down,
+              ignore,
+              pause_if_all_down_on_blocked,
+              pause_if_all_down_on_down,
+              pause_minority_on_blocked,
+              pause_minority_on_down,
+              partial_false_positive,
+              partial_to_full,
+              partial_pause_minority,
+              partial_pause_if_all_down
+            ]}
+        ]},
+      {net_ticktime_10, [], [
+          {cluster_size_2, [], [
+              pause_if_all_down_false_promises_mirrored,
+              pause_if_all_down_false_promises_unmirrored,
+              pause_minority_false_promises_mirrored,
+              pause_minority_false_promises_unmirrored
+            ]}
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config, [
+        fun rabbit_ct_broker_helpers:enable_dist_proxy_manager/1
+      ]).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(net_ticktime_1, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{net_ticktime, 1}]);
+init_per_group(net_ticktime_10, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{net_ticktime, 10}]);
+init_per_group(cluster_size_2, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]).
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase),
+    ClusterSize = ?config(rmq_nodes_count, Config),
+    TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_clustered, false},
+        {rmq_nodename_suffix, Testcase},
+        {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps() ++ [
+        fun rabbit_ct_broker_helpers:enable_dist_proxy/1,
+        fun rabbit_ct_broker_helpers:cluster_nodes/1
+      ]).
+
+end_per_testcase(Testcase, Config) ->
+    Config1 = rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+ignore(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    block_unblock([{A, B}, {A, C}]),
+    timer:sleep(?DELAY),
+    [B, C] = partitions(A),
+    [A] = partitions(B),
+    [A] = partitions(C),
+    ok.
+
+pause_minority_on_down(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    set_mode(Config, pause_minority),
+
+    true = is_running(A),
+
+    rabbit_ct_broker_helpers:kill_node(Config, B),
+    timer:sleep(?DELAY),
+    true = is_running(A),
+
+    rabbit_ct_broker_helpers:kill_node(Config, C),
+    await_running(A, false),
+    ok.
+
+pause_minority_on_blocked(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    set_mode(Config, pause_minority),
+    pause_on_blocked(A, B, C).
+
+pause_if_all_down_on_down(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    set_mode(Config, {pause_if_all_down, [C], ignore}),
+    [(true = is_running(N)) || N <- [A, B, C]],
+
+    rabbit_ct_broker_helpers:kill_node(Config, B),
+    timer:sleep(?DELAY),
+    [(true = is_running(N)) || N <- [A, C]],
+
+    rabbit_ct_broker_helpers:kill_node(Config, C),
+    timer:sleep(?DELAY),
+    await_running(A, false),
+    ok.
+
+pause_if_all_down_on_blocked(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    set_mode(Config, {pause_if_all_down, [C], ignore}),
+    pause_on_blocked(A, B, C).
+
+pause_on_blocked(A, B, C) ->
+    [(true = is_running(N)) || N <- [A, B, C]],
+    block([{A, B}, {A, C}]),
+    await_running(A, false),
+    [await_running(N, true) || N <- [B, C]],
+    unblock([{A, B}, {A, C}]),
+    [await_running(N, true) || N <- [A, B, C]],
+    Status = rpc:call(B, rabbit_mnesia, status, []),
+    [] = rabbit_misc:pget(partitions, Status),
+    ok.
+
+%%% Make sure we do not confirm any messages after a partition has
+%%% happened but before we pause, since any such confirmations would be
+%%% lies.
+%%%
+%%% This test has to use an AB cluster (not ABC) since GM ends up
+%%% taking longer to detect down slaves when there are more nodes and
+%%% we close the window by mistake.
+%%%
+%%% In general there are quite a few ways to accidentally cause this
+%%% test to pass since there are a lot of things in the broker that can
+%%% suddenly take several seconds to time out when TCP connections
+%%% won't establish.
+
+pause_minority_false_promises_mirrored(Config) ->
+    rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<".*">>, <<"all">>),
+    pause_false_promises(Config, pause_minority).
+
+pause_minority_false_promises_unmirrored(Config) ->
+    pause_false_promises(Config, pause_minority).
+
+pause_if_all_down_false_promises_mirrored(Config) ->
+    rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<".*">>, <<"all">>),
+    B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+    pause_false_promises(Config, {pause_if_all_down, [B], ignore}).
+
+pause_if_all_down_false_promises_unmirrored(Config) ->
+    B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+    pause_false_promises(Config, {pause_if_all_down, [B], ignore}).
+
+pause_false_promises(Config, ClusterPartitionHandling) ->
+    [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    set_mode(Config, [A], ClusterPartitionHandling),
+    ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+    ChB = rabbit_ct_client_helpers:open_channel(Config, B),
+    amqp_channel:call(ChB, #'queue.declare'{queue   = <<"test">>,
+                                            durable = true}),
+    amqp_channel:call(ChA, #'confirm.select'{}),
+    amqp_channel:register_confirm_handler(ChA, self()),
+
+    %% Cause a partition after 1s
+    Self = self(),
+    spawn_link(fun () ->
+                       timer:sleep(1000),
+                       %%io:format(user, "~p BLOCK~n", [calendar:local_time()]),
+                       block([{A, B}]),
+                       unlink(Self)
+               end),
+
+    %% Publish large no of messages, see how many we get confirmed
+    [amqp_channel:cast(ChA, #'basic.publish'{routing_key = <<"test">>},
+                       #amqp_msg{props = #'P_basic'{delivery_mode = 1}}) ||
+        _ <- lists:seq(1, 100000)],
+    %%io:format(user, "~p finish publish~n", [calendar:local_time()]),
+
+    %% Time for the partition to be detected. We don't put this sleep
+    %% in receive_acks since otherwise we'd have another similar sleep
+    %% at the end.
+    timer:sleep(30000),
+    Confirmed = receive_acks(0),
+    %%io:format(user, "~p got acks~n", [calendar:local_time()]),
+    await_running(A, false),
+    %%io:format(user, "~p A stopped~n", [calendar:local_time()]),
+
+    unblock([{A, B}]),
+    await_running(A, true),
+
+    %% But how many made it onto the rest of the cluster?
+    #'queue.declare_ok'{message_count = Survived} =
+        amqp_channel:call(ChB, #'queue.declare'{queue   = <<"test">>,
+                                                durable = true}),
+    %%io:format(user, "~p queue declared~n", [calendar:local_time()]),
+    case Confirmed > Survived of
+        true  -> io:format("Confirmed=~p Survived=~p~n", [Confirmed, Survived]);
+        false -> ok
+    end,
+    true = (Confirmed =< Survived),
+
+    rabbit_ct_client_helpers:close_channel(ChB),
+    rabbit_ct_client_helpers:close_channel(ChA),
+    ok.
+
+receive_acks(Max) ->
+    receive
+        #'basic.ack'{delivery_tag = DTag} ->
+            receive_acks(DTag)
+    after ?DELAY ->
+            Max
+    end.
+
+prompt_disconnect_detection(Config) ->
+    [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    ChB = rabbit_ct_client_helpers:open_channel(Config, B),
+    [amqp_channel:call(ChB, #'queue.declare'{}) || _ <- lists:seq(1, 100)],
+    block([{A, B}]),
+    timer:sleep(?DELAY),
+    %% We want to make sure we do not end up waiting for setuptime *
+    %% no of queues. Unfortunately that means we need a timeout...
+    [] = rabbit_ct_broker_helpers:rpc(Config, A,
+      rabbit_amqqueue, info_all, [<<"/">>], ?DELAY),
+    rabbit_ct_client_helpers:close_channel(ChB),
+    ok.
+
+ctl_ticktime_sync(Config) ->
+    %% Server has 1s net_ticktime, make sure ctl doesn't get disconnected
+    Cmd = ["eval", "timer:sleep(5000)."],
+    {ok, "ok\n"} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd).
+
+%% NB: we test full and partial partitions here.
+autoheal(Config) ->
+    set_mode(Config, autoheal),
+    do_autoheal(Config).
+
+autoheal_after_pause_if_all_down(Config) ->
+    [_, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    set_mode(Config, {pause_if_all_down, [B, C], autoheal}),
+    do_autoheal(Config).
+
+do_autoheal(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    Test = fun (Pairs) ->
+                   block_unblock(Pairs),
+                   %% Sleep to make sure all the partitions are noticed
+                   %% ?DELAY for the net_tick timeout
+                   timer:sleep(?DELAY),
+                   [await_listening(N, true) || N <- [A, B, C]],
+                   [await_partitions(N, []) || N <- [A, B, C]]
+           end,
+    Test([{B, C}]),
+    Test([{A, C}, {B, C}]),
+    Test([{A, B}, {A, C}, {B, C}]),
+    ok.
+
+partial_false_positive(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    block([{A, B}]),
+    timer:sleep(1000),
+    block([{A, C}]),
+    timer:sleep(?DELAY),
+    unblock([{A, B}, {A, C}]),
+    timer:sleep(?DELAY),
+    %% When B times out A's connection, it will check with C. C will
+    %% not have timed out A yet, but already it can't talk to it. We
+    %% need to not consider this a partial partition; B and C should
+    %% still talk to each other.
+    [B, C] = partitions(A),
+    [A] = partitions(B),
+    [A] = partitions(C),
+    ok.
+
+partial_to_full(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    block_unblock([{A, B}]),
+    timer:sleep(?DELAY),
+    %% There are several valid ways this could go, depending on how
+    %% the DOWN messages race: either A gets disconnected first and BC
+    %% stay together, or B gets disconnected first and AC stay
+    %% together, or both make it through and all three get
+    %% disconnected.
+    case {partitions(A), partitions(B), partitions(C)} of
+        {[B, C], [A],    [A]}    -> ok;
+        {[B],    [A, C], [B]}    -> ok;
+        {[B, C], [A, C], [A, B]} -> ok;
+        Partitions               -> exit({partitions, Partitions})
+    end.
+
+partial_pause_minority(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    set_mode(Config, pause_minority),
+    block([{A, B}]),
+    [await_running(N, false) || N <- [A, B]],
+    await_running(C, true),
+    unblock([{A, B}]),
+    [await_listening(N, true) || N <- [A, B, C]],
+    [await_partitions(N, []) || N <- [A, B, C]],
+    ok.
+
+partial_pause_if_all_down(Config) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    set_mode(Config, {pause_if_all_down, [B], ignore}),
+    block([{A, B}]),
+    await_running(A, false),
+    [await_running(N, true) || N <- [B, C]],
+    unblock([{A, B}]),
+    [await_listening(N, true) || N <- [A, B, C]],
+    [await_partitions(N, []) || N <- [A, B, C]],
+    ok.
+
+set_mode(Config, Mode) ->
+    rabbit_ct_broker_helpers:set_partition_handling_mode_globally(Config, Mode).
+
+set_mode(Config, Nodes, Mode) ->
+    rabbit_ct_broker_helpers:set_partition_handling_mode(Config, Nodes, Mode).
+
+block_unblock(Pairs) ->
+    block(Pairs),
+    timer:sleep(?DELAY),
+    unblock(Pairs).
+
+block(Pairs)   -> [block(X, Y) || {X, Y} <- Pairs].
+unblock(Pairs) -> [allow(X, Y) || {X, Y} <- Pairs].
+
+partitions(Node) ->
+    case rpc:call(Node, rabbit_node_monitor, partitions, []) of
+        {badrpc, {'EXIT', E}} = R -> case rabbit_misc:is_abnormal_exit(E) of
+                                         true  -> R;
+                                         false -> timer:sleep(1000),
+                                                  partitions(Node)
+                                     end;
+        Partitions                -> Partitions
+    end.
+
+block(X, Y) ->
+    rabbit_ct_broker_helpers:block_traffic_between(X, Y).
+
+allow(X, Y) ->
+    rabbit_ct_broker_helpers:allow_traffic_between(X, Y).
+
+await_running   (Node, Bool)  -> await(Node, Bool,  fun is_running/1).
+await_listening (Node, Bool)  -> await(Node, Bool,  fun is_listening/1).
+await_partitions(Node, Parts) -> await(Node, Parts, fun partitions/1).
+
+await(Node, Res, Fun) ->
+    case Fun(Node) of
+        Res -> ok;
+        _   -> timer:sleep(100),
+               await(Node, Res, Fun)
+    end.
+
+is_running(Node) -> rpc:call(Node, rabbit, is_running, []).
+
+is_listening(Node) ->
+    case rpc:call(Node, rabbit_networking, node_listeners, [Node]) of
+        []    -> false;
+        [_|_] -> true;
+        _     -> false
+    end.
diff --git a/rabbitmq-server/test/priority_queue_SUITE.erl b/rabbitmq-server/test/priority_queue_SUITE.erl
new file mode 100644 (file)
index 0000000..05853eb
--- /dev/null
@@ -0,0 +1,671 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(priority_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+    [
+      {group, cluster_size_2},
+      {group, cluster_size_3}
+    ].
+
+groups() ->
+    [
+     {cluster_size_2, [], [
+                           ackfold,
+                           drop,
+                           dropwhile_fetchwhile,
+                           info_head_message_timestamp,
+                           matching,
+                           mirror_queue_sync,
+                           mirror_queue_sync_priority_above_max,
+                           mirror_queue_sync_priority_above_max_pending_ack,
+                           mirror_queue_sync_order,
+                           purge,
+                           requeue,
+                           resume,
+                           simple_order,
+                           straight_through,
+                           invoke
+                          ]},
+     {cluster_size_3, [], [
+                           mirror_queue_auto_ack,
+                           mirror_fast_reset_policy,
+                           mirror_reset_policy
+                          ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+    Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+                                                    {rmq_nodes_count, 2},
+                                                    {rmq_nodename_suffix, Suffix}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps());
+init_per_group(cluster_size_3, Config) ->
+    Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+                                                    {rmq_nodes_count, 3},
+                                                    {rmq_nodename_suffix, Suffix}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_Group, Config) ->
+    rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_client_helpers:setup_steps(),
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_client_helpers:teardown_steps(),
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%% The BQ API is used in all sorts of places in all sorts of
+%% ways. Therefore we have to jump through a few different hoops
+%% in order to integration-test it.
+%%
+%% * start/1, stop/0, init/3, terminate/2, delete_and_terminate/2
+%%   - starting and stopping rabbit. durable queues / persistent msgs needed
+%%     to test recovery
+%%
+%% * publish/5, drain_confirmed/1, fetch/2, ack/2, is_duplicate/2, msg_rates/1,
+%%   needs_timeout/1, timeout/1, invoke/3, resume/1 [0]
+%%   - regular publishing and consuming, with confirms and acks and durability
+%%
+%% * publish_delivered/4    - publish with acks straight through
+%% * discard/3              - publish without acks straight through
+%% * dropwhile/2            - expire messages without DLX
+%% * fetchwhile/4           - expire messages with DLX
+%% * ackfold/4              - reject messages with DLX
+%% * requeue/2              - reject messages without DLX
+%% * drop/2                 - maxlen messages without DLX
+%% * purge/1                - issue AMQP queue.purge
+%% * purge_acks/1           - mirror queue explicit sync with unacked msgs
+%% * fold/3                 - mirror queue explicit sync
+%% * depth/1                - mirror queue implicit sync detection
+%% * len/1, is_empty/1      - info items
+%% * handle_pre_hibernate/1 - hibernation
+%%
+%% * set_ram_duration_target/2, ram_duration/1, status/1
+%%   - maybe need unit testing?
+%%
+%% [0] publish enough to get credit flow from msg store
+
+simple_order(Config) ->
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+    Q = <<"simple_order-queue">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+    get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+    publish(Ch, Q, [2, 3, 1, 2, 3, 1, 2, 3, 1]),
+    get_all(Ch, Q, no_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+    publish(Ch, Q, [3, 1, 2, 3, 1, 2, 3, 1, 2]),
+    get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+    delete(Ch, Q),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+matching(Config) ->
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+    Q = <<"matching-queue">>,
+    declare(Ch, Q, 5),
+    %% We round priority down, and 0 is the default
+    publish(Ch, Q, [undefined, 0, 5, 10, undefined]),
+    get_all(Ch, Q, do_ack, [5, 10, undefined, 0, undefined]),
+    delete(Ch, Q),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+resume(Config) ->
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+    Q = <<"resume-queue">>,
+    declare(Ch, Q, 5),
+    amqp_channel:call(Ch, #'confirm.select'{}),
+    publish_many(Ch, Q, 10000),
+    amqp_channel:wait_for_confirms(Ch),
+    amqp_channel:call(Ch, #'queue.purge'{queue = Q}), %% Assert it exists
+    delete(Ch, Q),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+straight_through(Config) ->
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+    Q = <<"straight_through-queue">>,
+    declare(Ch, Q, 3),
+    [begin
+         consume(Ch, Q, Ack),
+         [begin
+              publish1(Ch, Q, P),
+              assert_delivered(Ch, Ack, P)
+          end || P <- [1, 2, 3]],
+         cancel(Ch)
+     end || Ack <- [do_ack, no_ack]],
+    get_empty(Ch, Q),
+    delete(Ch, Q),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+invoke(Config) ->
+    %% Synthetic test to check the invoke callback, as the bug tested here
+    %% is only triggered with a race condition.
+    %% When mirroring is stopped, the backing queue of rabbit_amqqueue_process
+    %% changes from rabbit_mirror_queue_master to rabbit_priority_queue,
+    %% which shouldn't receive any invoke call. However, there might
+    %% be pending messages so the priority queue receives the
+    %% `run_backing_queue` cast message sent to the old master.
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+    Q = <<"invoke-queue">>,
+    declare(Ch, Q, 3),
+    Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+    rabbit_ct_broker_helpers:rpc(
+      Config, A, gen_server, cast,
+      [Pid,
+       {run_backing_queue, ?MODULE, fun(_, _) -> ok end}]),
+    Pid2 = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+    Pid = Pid2,
+    delete(Ch, Q),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+dropwhile_fetchwhile(Config) ->
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+    Q = <<"dropwhile_fetchwhile-queue">>,
+    [begin
+         declare(Ch, Q, Args ++ arguments(3)),
+         publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+         timer:sleep(10),
+         get_empty(Ch, Q),
+         delete(Ch, Q)
+     end ||
+        Args <- [[{<<"x-message-ttl">>, long, 1}],
+                 [{<<"x-message-ttl">>,          long,    1},
+                  {<<"x-dead-letter-exchange">>, longstr, <<"amq.fanout">>}]
+                ]],
+    rabbit_ct_client_helpers:close_channel(Ch),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+ackfold(Config) ->
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+    Q = <<"ackfolq-queue1">>,
+    Q2 = <<"ackfold-queue2">>,
+    declare(Ch, Q,
+            [{<<"x-dead-letter-exchange">>, longstr, <<>>},
+             {<<"x-dead-letter-routing-key">>, longstr, Q2}
+             | arguments(3)]),
+    declare(Ch, Q2, none),
+    publish(Ch, Q, [1, 2, 3]),
+    [_, _, DTag] = get_all(Ch, Q, manual_ack, [3, 2, 1]),
+    amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+                                        multiple     = true,
+                                        requeue      = false}),
+    timer:sleep(100),
+    get_all(Ch, Q2, do_ack, [3, 2, 1]),
+    delete(Ch, Q),
+    delete(Ch, Q2),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+requeue(Config) ->
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+    Q = <<"requeue-queue">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [1, 2, 3]),
+    [_, _, DTag] = get_all(Ch, Q, manual_ack, [3, 2, 1]),
+    amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+                                        multiple     = true,
+                                        requeue      = true}),
+    get_all(Ch, Q, do_ack, [3, 2, 1]),
+    delete(Ch, Q),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+drop(Config) ->
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+    Q = <<"drop-queue">>,
+    declare(Ch, Q, [{<<"x-max-length">>, long, 4} | arguments(3)]),
+    publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+    %% We drop from the head, so this is according to the "spec" even
+    %% if not likely to be what the user wants.
+    get_all(Ch, Q, do_ack, [2, 1, 1, 1]),
+    delete(Ch, Q),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+purge(Config) ->
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+    Q = <<"purge-queue">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [1, 2, 3]),
+    amqp_channel:call(Ch, #'queue.purge'{queue = Q}),
+    get_empty(Ch, Q),
+    delete(Ch, Q),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+info_head_message_timestamp(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, info_head_message_timestamp1, [Config]).
+
+info_head_message_timestamp1(_Config) ->
+    QName = rabbit_misc:r(<<"/">>, queue,
+      <<"info_head_message_timestamp-queue">>),
+    Q0 = rabbit_amqqueue:pseudo_queue(QName, self()),
+    Q = Q0#amqqueue{arguments = [{<<"x-max-priority">>, long, 2}]},
+    PQ = rabbit_priority_queue,
+    BQS1 = PQ:init(Q, new, fun(_, _) -> ok end),
+    %% The queue is empty: no timestamp.
+    true = PQ:is_empty(BQS1),
+    '' = PQ:info(head_message_timestamp, BQS1),
+    %% Publish one message with timestamp 1000.
+    Msg1 = #basic_message{
+      id = msg1,
+      content = #content{
+        properties = #'P_basic'{
+          priority = 1,
+          timestamp = 1000
+        }},
+      is_persistent = false
+    },
+    BQS2 = PQ:publish(Msg1, #message_properties{size = 0}, false, self(),
+      noflow, BQS1),
+    1000 = PQ:info(head_message_timestamp, BQS2),
+    %% Publish a higher priority message with no timestamp.
+    Msg2 = #basic_message{
+      id = msg2,
+      content = #content{
+        properties = #'P_basic'{
+          priority = 2
+        }},
+      is_persistent = false
+    },
+    BQS3 = PQ:publish(Msg2, #message_properties{size = 0}, false, self(),
+      noflow, BQS2),
+    '' = PQ:info(head_message_timestamp, BQS3),
+    %% Consume message with no timestamp.
+    {{Msg2, _, _}, BQS4} = PQ:fetch(false, BQS3),
+    1000 = PQ:info(head_message_timestamp, BQS4),
+    %% Consume message with timestamp 1000, but do not acknowledge it
+    %% yet. The goal is to verify that the unacknowledged message's
+    %% timestamp is returned.
+    {{Msg1, _, AckTag}, BQS5} = PQ:fetch(true, BQS4),
+    1000 = PQ:info(head_message_timestamp, BQS5),
+    %% Ack message. The queue is empty now.
+    {[msg1], BQS6} = PQ:ack([AckTag], BQS5),
+    true = PQ:is_empty(BQS6),
+    '' = PQ:info(head_message_timestamp, BQS6),
+    PQ:delete_and_terminate(a_whim, BQS6),
+    passed.
+
+ram_duration(_Config) ->
+    QName = rabbit_misc:r(<<"/">>, queue, <<"ram_duration-queue">>),
+    Q0 = rabbit_amqqueue:pseudo_queue(QName, self()),
+    Q = Q0#amqqueue{arguments = [{<<"x-max-priority">>, long, 5}]},
+    PQ = rabbit_priority_queue,
+    BQS1 = PQ:init(Q, new, fun(_, _) -> ok end),
+    {_Duration1, BQS2} = PQ:ram_duration(BQS1),
+    BQS3 = PQ:set_ram_duration_target(infinity, BQS2),
+    BQS4 = PQ:set_ram_duration_target(1, BQS3),
+    {_Duration2, BQS5} = PQ:ram_duration(BQS4),
+    PQ:delete_and_terminate(a_whim, BQS5),
+    passed.
+
+mirror_queue_sync(Config) ->
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0),
+    Q = <<"mirror_queue_sync-queue">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [1, 2, 3]),
+    ok = rabbit_ct_broker_helpers:set_ha_policy(Config, 0,
+      <<"^mirror_queue_sync-queue$">>, <<"all">>),
+    publish(Ch, Q, [1, 2, 3, 1, 2, 3]),
+    %% master now has 9, slave 6.
+    get_partial(Ch, Q, manual_ack, [3, 3, 3, 2, 2, 2]),
+    %% So some but not all are unacked at the slave
+    Nodename0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    rabbit_ct_broker_helpers:control_action(sync_queue, Nodename0,
+      [binary_to_list(Q)], [{"-p", "/"}]),
+    wait_for_sync(Config, Nodename0, rabbit_misc:r(<<"/">>, queue, Q)),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+mirror_queue_sync_priority_above_max(Config) ->
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    %% Tests synchronisation of slaves when priority is higher than max priority.
+    %% This causes an infinity loop (and test timeout) before rabbitmq-server-795
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+    Q = <<"mirror_queue_sync_priority_above_max-queue">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [5, 5, 5]),
+    ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A,
+      <<".*">>, <<"all">>),
+    rabbit_ct_broker_helpers:control_action(sync_queue, A,
+      [binary_to_list(Q)], [{"-p", "/"}]),
+    wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+    delete(Ch, Q),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+mirror_queue_sync_priority_above_max_pending_ack(Config) ->
+    [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    %% Tests synchronisation of slaves when priority is higher than max priority
+    %% and there are pending acks.
+    %% This causes an infinity loop (and test timeout) before rabbitmq-server-795
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+    Q = <<"mirror_queue_sync_priority_above_max_pending_ack-queue">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [5, 5, 5]),
+    %% Consume but 'forget' to acknowledge
+    get_without_ack(Ch, Q),
+    get_without_ack(Ch, Q),
+    ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A,
+      <<".*">>, <<"all">>),
+    rabbit_ct_broker_helpers:control_action(sync_queue, A,
+      [binary_to_list(Q)], [{"-p", "/"}]),
+    wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+    synced_msgs(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 3),
+    synced_msgs(Config, B, rabbit_misc:r(<<"/">>, queue, Q), 3),
+    delete(Ch, Q),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+mirror_queue_auto_ack(Config) ->
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    %% Check correct use of AckRequired in the notifications to the slaves.
+    %% If slaves are notified with AckRequired == true when it is false,
+    %% the slaves will crash with the depth notification as they will not
+    %% match the master delta.
+    %% Bug rabbitmq-server 687
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+    Q = <<"mirror_queue_auto_ack-queue">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [1, 2, 3]),
+    ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A,
+      <<".*">>, <<"all">>),
+    get_partial(Ch, Q, no_ack, [3, 2, 1]),
+
+    %% Retrieve slaves
+    SPids = slave_pids(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+    [{SNode1, _SPid1}, {SNode2, SPid2}] = nodes_and_pids(SPids),
+
+    %% Restart one of the slaves so `request_depth` is triggered
+    rabbit_ct_broker_helpers:restart_node(Config, SNode1),
+
+    %% The alive slave must have the same pid after its neighbour is restarted
+    timer:sleep(3000), %% ugly but we can't know when the `depth` instruction arrives
+    Slaves = nodes_and_pids(slave_pids(Config, A, rabbit_misc:r(<<"/">>, queue, Q))),
+    SPid2 = proplists:get_value(SNode2, Slaves),
+
+    delete(Ch, Q),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+mirror_queue_sync_order(Config) ->
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+    {Conn2, Ch2} = rabbit_ct_client_helpers:open_connection_and_channel(Config, B),
+    Q = <<"mirror_queue_sync_order-queue">>,
+    declare(Ch, Q, 3),
+    publish_payload(Ch, Q, [{1, <<"msg1">>}, {2, <<"msg2">>},
+                            {2, <<"msg3">>}, {2, <<"msg4">>},
+                            {3, <<"msg5">>}]),
+    rabbit_ct_client_helpers:close_channel(Ch),
+
+    %% Add and sync slave
+    ok = rabbit_ct_broker_helpers:set_ha_policy(
+           Config, A, <<"^mirror_queue_sync_order-queue$">>, <<"all">>),
+    rabbit_ct_broker_helpers:control_action(sync_queue, A,
+                                            [binary_to_list(Q)], [{"-p", "/"}]),
+    wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+
+    %% Stop the master
+    rabbit_ct_broker_helpers:stop_node(Config, A),
+
+    get_payload(Ch2, Q, do_ack, [<<"msg5">>, <<"msg2">>, <<"msg3">>,
+                                 <<"msg4">>, <<"msg1">>]),
+
+    delete(Ch2, Q),
+    rabbit_ct_broker_helpers:start_node(Config, A),
+    rabbit_ct_client_helpers:close_connection(Conn),
+    rabbit_ct_client_helpers:close_connection(Conn2),
+    passed.
+
+mirror_reset_policy(Config) ->
+    %% Gives time to the master to go through all stages.
+    %% Might eventually trigger some race conditions from #802,
+    %% although for that I would expect a longer run and higher
+    %% number of messages in the system.
+    mirror_reset_policy(Config, 5000).
+
+mirror_fast_reset_policy(Config) ->
+    %% This test seems to trigger the bug tested in invoke/1, but it
+    %% cannot guarantee it will always happen. Thus, both tests
+    %% should stay in the test suite.
+    mirror_reset_policy(Config, 5).
+
+
+mirror_reset_policy(Config, Wait) ->
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A),
+    Q = <<"mirror_reset_policy-queue">>,
+    declare(Ch, Q, 5),
+    Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+    publish_many(Ch, Q, 20000),
+    [begin
+         rabbit_ct_broker_helpers:set_ha_policy(
+           Config, A, <<"^mirror_reset_policy-queue$">>, <<"all">>,
+           [{<<"ha-sync-mode">>, <<"automatic">>}]),
+         timer:sleep(Wait),
+         rabbit_ct_broker_helpers:clear_policy(
+           Config, A, <<"^mirror_reset_policy-queue$">>),
+         timer:sleep(Wait)
+     end || _ <- lists:seq(1, 10)],
+    timer:sleep(1000),
+    ok = rabbit_ct_broker_helpers:set_ha_policy(
+           Config, A, <<"^mirror_reset_policy-queue$">>, <<"all">>,
+           [{<<"ha-sync-mode">>, <<"automatic">>}]),
+    wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 2),
+    %% Verify master has not crashed
+    Pid = queue_pid(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+    delete(Ch, Q),
+
+    rabbit_ct_client_helpers:close_connection(Conn),
+    passed.
+
+%%----------------------------------------------------------------------------
+
+declare(Ch, Q, Args) when is_list(Args) ->
+    amqp_channel:call(Ch, #'queue.declare'{queue     = Q,
+                                           durable   = true,
+                                           arguments = Args});
+declare(Ch, Q, Max) ->
+    declare(Ch, Q, arguments(Max)).
+
+delete(Ch, Q) ->
+    amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+publish(Ch, Q, Ps) ->
+    amqp_channel:call(Ch, #'confirm.select'{}),
+    [publish1(Ch, Q, P) || P <- Ps],
+    amqp_channel:wait_for_confirms(Ch).
+
+publish_payload(Ch, Q, PPds) ->
+    amqp_channel:call(Ch, #'confirm.select'{}),
+    [publish1(Ch, Q, P, Pd) || {P, Pd} <- PPds],
+    amqp_channel:wait_for_confirms(Ch).
+
+publish_many(_Ch, _Q, 0) -> ok;
+publish_many( Ch,  Q, N) -> publish1(Ch, Q, rand_compat:uniform(5)),
+                            publish_many(Ch, Q, N - 1).
+
+publish1(Ch, Q, P) ->
+    amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+                      #amqp_msg{props   = props(P),
+                                payload = priority2bin(P)}).
+
+publish1(Ch, Q, P, Pd) ->
+    amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+                      #amqp_msg{props   = props(P),
+                                payload = Pd}).
+
+props(undefined) -> #'P_basic'{delivery_mode = 2};
+props(P)         -> #'P_basic'{priority      = P,
+                               delivery_mode = 2}.
+
+consume(Ch, Q, Ack) ->
+    amqp_channel:subscribe(Ch, #'basic.consume'{queue        = Q,
+                                                no_ack       = Ack =:= no_ack,
+                                                consumer_tag = <<"ctag">>},
+                           self()),
+    receive
+        #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+             ok
+    end.
+
+cancel(Ch) ->
+    amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = <<"ctag">>}).
+
+assert_delivered(Ch, Ack, P) ->
+    PBin = priority2bin(P),
+    receive
+        {#'basic.deliver'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} ->
+            PBin = PBin2,
+            maybe_ack(Ch, Ack, DTag)
+    end.
+
+get_all(Ch, Q, Ack, Ps) ->
+    DTags = get_partial(Ch, Q, Ack, Ps),
+    get_empty(Ch, Q),
+    DTags.
+
+get_partial(Ch, Q, Ack, Ps) ->
+    [get_ok(Ch, Q, Ack, priority2bin(P)) || P <- Ps].
+
+get_empty(Ch, Q) ->
+    #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}).
+
+get_ok(Ch, Q, Ack, PBin) ->
+    {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} =
+        amqp_channel:call(Ch, #'basic.get'{queue  = Q,
+                                           no_ack = Ack =:= no_ack}),
+    PBin = PBin2,
+    maybe_ack(Ch, Ack, DTag).
+
+get_payload(Ch, Q, Ack, Ps) ->
+    [get_ok(Ch, Q, Ack, P) || P <- Ps].
+
+get_without_ack(Ch, Q) ->
+    {#'basic.get_ok'{}, _} =
+        amqp_channel:call(Ch, #'basic.get'{queue  = Q, no_ack = false}).
+
+maybe_ack(Ch, do_ack, DTag) ->
+    amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+    DTag;
+maybe_ack(_Ch, _, DTag) ->
+    DTag.
+
+arguments(none) -> [];
+arguments(Max)  -> [{<<"x-max-priority">>, byte, Max}].
+
+priority2bin(undefined) -> <<"undefined">>;
+priority2bin(Int)       -> list_to_binary(integer_to_list(Int)).
+
+%%----------------------------------------------------------------------------
+
+wait_for_sync(Config, Nodename, Q) ->
+    wait_for_sync(Config, Nodename, Q, 1).
+
+wait_for_sync(Config, Nodename, Q, Nodes) ->
+    wait_for_sync(Config, Nodename, Q, Nodes, 600).
+
+wait_for_sync(_, _, _, _, 0) ->
+    throw(sync_timeout);
+wait_for_sync(Config, Nodename, Q, Nodes, N) ->
+    case synced(Config, Nodename, Q, Nodes) of
+        true  -> ok;
+        false -> timer:sleep(100),
+                 wait_for_sync(Config, Nodename, Q, Nodes, N-1)
+    end.
+
+synced(Config, Nodename, Q, Nodes) ->
+    Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+      rabbit_amqqueue, info_all, [<<"/">>, [name, synchronised_slave_pids]]),
+    [SSPids] = [Pids || [{name, Q1}, {synchronised_slave_pids, Pids}] <- Info,
+                        Q =:= Q1],
+    length(SSPids) =:= Nodes.
+
+synced_msgs(Config, Nodename, Q, Expected) ->
+    Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+      rabbit_amqqueue, info_all, [<<"/">>, [name, messages]]),
+    [M] = [M || [{name, Q1}, {messages, M}] <- Info, Q =:= Q1],
+    M =:= Expected.
+
+nodes_and_pids(SPids) ->
+    lists:zip([node(S) || S <- SPids], SPids).
+
+slave_pids(Config, Nodename, Q) ->
+    Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+      rabbit_amqqueue, info_all, [<<"/">>, [name, slave_pids]]),
+    [SPids] = [SPids || [{name, Q1}, {slave_pids, SPids}] <- Info,
+                        Q =:= Q1],
+    SPids.
+
+queue_pid(Config, Nodename, Q) ->
+    Info = rabbit_ct_broker_helpers:rpc(
+             Config, Nodename,
+             rabbit_amqqueue, info_all, [<<"/">>, [name, pid]]),
+    [Pid] = [P || [{name, Q1}, {pid, P}] <- Info, Q =:= Q1],
+    Pid.
+
+%%----------------------------------------------------------------------------
diff --git a/rabbitmq-server/test/priority_queue_recovery_SUITE.erl b/rabbitmq-server/test/priority_queue_recovery_SUITE.erl
new file mode 100644 (file)
index 0000000..9e2ffbd
--- /dev/null
@@ -0,0 +1,153 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(priority_queue_recovery_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+    [
+      {group, non_parallel_tests}
+    ].
+
+groups() ->
+    [
+     {non_parallel_tests, [], [
+                               recovery %% Restart RabbitMQ.
+                              ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 2}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_group(_, Config) ->
+    rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()).
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+recovery(Config) ->
+    {Conn, Ch} = open(Config),
+    Q = <<"recovery-queue">>,
+    declare(Ch, Q, 3),
+    publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+    rabbit_ct_client_helpers:close_channel(Ch),
+    rabbit_ct_client_helpers:close_connection(Conn),
+
+    rabbit_ct_broker_helpers:restart_broker(Config, 0),
+
+    {Conn2, Ch2} = open(Config, 1),
+    get_all(Ch2, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+    delete(Ch2, Q),
+    rabbit_ct_client_helpers:close_channel(Ch2),
+    rabbit_ct_client_helpers:close_connection(Conn2),
+    passed.
+
+
+%%----------------------------------------------------------------------------
+
+open(Config) ->
+    open(Config, 0).
+
+open(Config, NodeIndex) ->
+    rabbit_ct_client_helpers:open_connection_and_channel(Config, NodeIndex).
+
+declare(Ch, Q, Args) when is_list(Args) ->
+    amqp_channel:call(Ch, #'queue.declare'{queue     = Q,
+                                           durable   = true,
+                                           arguments = Args});
+declare(Ch, Q, Max) ->
+    declare(Ch, Q, arguments(Max)).
+
+delete(Ch, Q) ->
+    amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+publish(Ch, Q, Ps) ->
+    amqp_channel:call(Ch, #'confirm.select'{}),
+    [publish1(Ch, Q, P) || P <- Ps],
+    amqp_channel:wait_for_confirms(Ch).
+
+publish1(Ch, Q, P) ->
+    amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+                      #amqp_msg{props   = props(P),
+                                payload = priority2bin(P)}).
+
+publish1(Ch, Q, P, Pd) ->
+    amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+                      #amqp_msg{props   = props(P),
+                                payload = Pd}).
+
+get_all(Ch, Q, Ack, Ps) ->
+    DTags = get_partial(Ch, Q, Ack, Ps),
+    get_empty(Ch, Q),
+    DTags.
+
+get_partial(Ch, Q, Ack, Ps) ->
+    [get_ok(Ch, Q, Ack, priority2bin(P)) || P <- Ps].
+
+get_empty(Ch, Q) ->
+    #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}).
+
+get_ok(Ch, Q, Ack, PBin) ->
+    {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} =
+        amqp_channel:call(Ch, #'basic.get'{queue  = Q,
+                                           no_ack = Ack =:= no_ack}),
+    PBin = PBin2,
+    maybe_ack(Ch, Ack, DTag).
+
+maybe_ack(Ch, do_ack, DTag) ->
+    amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+    DTag;
+maybe_ack(_Ch, _, DTag) ->
+    DTag.
+
+arguments(none) -> [];
+arguments(Max)  -> [{<<"x-max-priority">>, byte, Max}].
+
+priority2bin(undefined) -> <<"undefined">>;
+priority2bin(Int)       -> list_to_binary(integer_to_list(Int)).
+
+props(undefined) -> #'P_basic'{delivery_mode = 2};
+props(P)         -> #'P_basic'{priority      = P,
+                               delivery_mode = 2}.
diff --git a/rabbitmq-server/test/queue_master_location_SUITE.erl b/rabbitmq-server/test/queue_master_location_SUITE.erl
new file mode 100644 (file)
index 0000000..e77f27f
--- /dev/null
@@ -0,0 +1,271 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(queue_master_location_SUITE).
+
+%% These tests use an ABC cluster with each node initialised with
+%% a different number of queues. When a queue is declared, different
+%% strategies can be applied to determine the queue's master node. Queue
+%% location strategies can be applied in the following ways;
+%%   1. As policy,
+%%   2. As config (in rabbitmq.config),
+%%   3. or as part of the queue's declare arguements.
+%%
+%% Currently supported strategies are;
+%%   min-masters : The queue master node is calculated as the one with the
+%%                 least bound queues in the cluster.
+%%   client-local: The queue master node is the local node from which
+%%                 the declaration is being carried out from
+%%   random      : The queue master node is randomly selected.
+%%
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(DEFAULT_VHOST_PATH, (<<"/">>)).
+-define(POLICY, <<"^qm.location$">>).
+
+all() ->
+    [
+      {group, cluster_size_3}
+    ].
+
+groups() ->
+    [
+      {cluster_size_3, [], [
+          declare_args,
+          declare_policy,
+          declare_config,
+          calculate_min_master,
+          calculate_random,
+          calculate_client_local
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_3, Config) ->
+    rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 3} %% Replaced with a list of node names later.
+      ]).
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase),
+    ClusterSize = ?config(rmq_nodes_count, Config),
+    Nodenames = [
+      list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I]))
+      || I <- lists:seq(1, ClusterSize)
+    ],
+    TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, Nodenames},
+        {rmq_nodes_clustered, true},
+        {rmq_nodename_suffix, Testcase},
+        {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+    Config1 = rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%%
+%% Queue 'declarations'
+%%
+
+declare_args(Config) ->
+    setup_test_environment(Config),
+    unset_location_config(Config),
+    QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>),
+    Args = [{<<"x-queue-master-locator">>, <<"min-masters">>}],
+    declare(Config, QueueName, false, false, Args, none),
+    verify_min_master(Config, Q).
+
+declare_policy(Config) ->
+    setup_test_environment(Config),
+    unset_location_config(Config),
+    set_location_policy(Config, ?POLICY, <<"min-masters">>),
+    QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>),
+    declare(Config, QueueName, false, false, _Args=[], none),
+    verify_min_master(Config, Q).
+
+declare_config(Config) ->
+    setup_test_environment(Config),
+    set_location_config(Config, <<"min-masters">>),
+    QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>),
+    declare(Config, QueueName, false, false, _Args=[], none),
+    verify_min_master(Config, Q),
+    unset_location_config(Config),
+    ok.
+
+%%
+%% Test 'calculations'
+%%
+
+calculate_min_master(Config) ->
+    setup_test_environment(Config),
+    QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>),
+    Args = [{<<"x-queue-master-locator">>, <<"min-masters">>}],
+    declare(Config, QueueName, false, false, Args, none),
+    verify_min_master(Config, Q),
+    ok.
+
+calculate_random(Config) ->
+    setup_test_environment(Config),
+    QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>),
+    Args = [{<<"x-queue-master-locator">>, <<"random">>}],
+    declare(Config, QueueName, false, false, Args, none),
+    verify_random(Config, Q),
+    ok.
+
+calculate_client_local(Config) ->
+    setup_test_environment(Config),
+    QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>),
+    Args = [{<<"x-queue-master-locator">>, <<"client-local">>}],
+    declare(Config, QueueName, false, false, Args, none),
+    verify_client_local(Config, Q),
+    ok.
+
+%%
+%% Setup environment
+%%
+
+setup_test_environment(Config)  ->
+    Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    [distribute_queues(Config, Node) || Node <- Nodes],
+    ok.
+
+distribute_queues(Config, Node) ->
+    ok  = rpc:call(Node, application, unset_env, [rabbit, queue_master_location]),
+    Count = case rabbit_ct_broker_helpers:nodename_to_index(Config, Node) of
+        0 -> 15;
+        1 -> 8;
+        2 -> 1
+    end,
+
+    Channel = rabbit_ct_client_helpers:open_channel(Config, Node),
+    ok = declare_queues(Channel, declare_fun(), Count),
+    ok = create_e2e_binding(Channel, [<< "ex_1" >>, << "ex_2" >>]),
+    {ok, Channel}.
+
+%%
+%% Internal queue handling
+%%
+
+declare_queues(Channel, DeclareFun, 1) -> DeclareFun(Channel);
+declare_queues(Channel, DeclareFun, N) ->
+    DeclareFun(Channel),
+    declare_queues(Channel, DeclareFun, N-1).
+
+declare_exchange(Channel, Ex) ->
+    #'exchange.declare_ok'{} =
+        amqp_channel:call(Channel, #'exchange.declare'{exchange = Ex}),
+    {ok, Ex}.
+
+declare_binding(Channel, Binding) ->
+    #'exchange.bind_ok'{} = amqp_channel:call(Channel, Binding),
+    ok.
+
+declare_fun() ->
+    fun(Channel) ->
+            #'queue.declare_ok'{} = amqp_channel:call(Channel, get_random_queue_declare()),
+            ok
+    end.
+
+create_e2e_binding(Channel, ExNamesBin) ->
+    [{ok, Ex1}, {ok, Ex2}] = [declare_exchange(Channel, Ex) || Ex <- ExNamesBin],
+    Binding = #'exchange.bind'{source = Ex1, destination = Ex2},
+    ok = declare_binding(Channel, Binding).
+
+get_random_queue_declare() ->
+    #'queue.declare'{passive     = false,
+                     durable     = false,
+                     exclusive   = true,
+                     auto_delete = false,
+                     nowait      = false,
+                     arguments   = []}.
+
+%%
+%% Internal helper functions
+%%
+
+get_cluster() -> [node()|nodes()].
+
+min_master_node(Config) ->
+    hd(lists:reverse(
+        rabbit_ct_broker_helpers:get_node_configs(Config, nodename))).
+
+set_location_config(Config, Strategy) ->
+    Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    [ok = rpc:call(Node, application, set_env,
+                   [rabbit, queue_master_locator, Strategy]) || Node <- Nodes],
+    ok.
+
+unset_location_config(Config) ->
+    Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    [ok = rpc:call(Node, application, unset_env,
+                   [rabbit, queue_master_locator]) || Node <- Nodes],
+    ok.
+
+declare(Config, QueueName, Durable, AutoDelete, Args, Owner) ->
+    Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    {new, Queue} = rpc:call(Node, rabbit_amqqueue, declare,
+                            [QueueName, Durable, AutoDelete, Args, Owner]),
+    Queue.
+
+verify_min_master(Config, Q) ->
+    Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    MinMaster = min_master_node(Config),
+    {ok, MinMaster} = rpc:call(Node, rabbit_queue_master_location_misc,
+                               lookup_master, [Q, ?DEFAULT_VHOST_PATH]).
+
+verify_random(Config, Q) ->
+    [Node | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    {ok, Master} = rpc:call(Node, rabbit_queue_master_location_misc,
+                            lookup_master, [Q, ?DEFAULT_VHOST_PATH]),
+    true = lists:member(Master, Nodes).
+
+verify_client_local(Config, Q) ->
+    Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    {ok, Node} = rpc:call(Node, rabbit_queue_master_location_misc,
+                          lookup_master, [Q, ?DEFAULT_VHOST_PATH]).
+
+set_location_policy(Config, Name, Strategy) ->
+    ok = rabbit_ct_broker_helpers:set_policy(Config, 0,
+      Name, <<".*">>, <<"queues">>, [{<<"queue-master-locator">>, Strategy}]).
diff --git a/rabbitmq-server/test/rabbit_ha_test_consumer.erl b/rabbitmq-server/test/rabbit_ha_test_consumer.erl
new file mode 100644 (file)
index 0000000..f374863
--- /dev/null
@@ -0,0 +1,114 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+-module(rabbit_ha_test_consumer).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-export([await_response/1, create/5, start/6]).
+
+await_response(ConsumerPid) ->
+    case receive {ConsumerPid, Response} -> Response end of
+        {error, Reason}  -> erlang:error(Reason);
+        ok               -> ok
+    end.
+
+create(Channel, Queue, TestPid, CancelOnFailover, ExpectingMsgs) ->
+    ConsumerPid = spawn_link(?MODULE, start,
+                             [TestPid, Channel, Queue, CancelOnFailover,
+                              ExpectingMsgs + 1, ExpectingMsgs]),
+    amqp_channel:subscribe(
+      Channel, consume_method(Queue, CancelOnFailover), ConsumerPid),
+    ConsumerPid.
+
+start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) ->
+    error_logger:info_msg("consumer ~p on ~p awaiting ~w messages "
+                          "(lowest seen = ~w, cancel-on-failover = ~w)~n",
+                          [self(), Channel, MsgsToConsume, LowestSeen,
+                           CancelOnFailover]),
+    run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume).
+
+run(TestPid, _Channel, _Queue, _CancelOnFailover, _LowestSeen, 0) ->
+    consumer_reply(TestPid, ok);
+run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) ->
+    receive
+        #'basic.consume_ok'{} ->
+            run(TestPid, Channel, Queue,
+                CancelOnFailover, LowestSeen, MsgsToConsume);
+        {Delivery = #'basic.deliver'{ redelivered = Redelivered },
+         #amqp_msg{payload = Payload}} ->
+            MsgNum = list_to_integer(binary_to_list(Payload)),
+
+            ack(Delivery, Channel),
+
+            %% we can receive any message we've already seen and,
+            %% because of the possibility of multiple requeuings, we
+            %% might see these messages in any order. If we are seeing
+            %% a message again, we don't decrement the MsgsToConsume
+            %% counter.
+            if
+                MsgNum + 1 == LowestSeen ->
+                    run(TestPid, Channel, Queue,
+                        CancelOnFailover, MsgNum, MsgsToConsume - 1);
+                MsgNum >= LowestSeen ->
+                    error_logger:info_msg(
+                      "consumer ~p on ~p ignoring redeliverd msg ~p~n",
+                      [self(), Channel, MsgNum]),
+                    true = Redelivered, %% ASSERTION
+                    run(TestPid, Channel, Queue,
+                        CancelOnFailover, LowestSeen, MsgsToConsume);
+                true ->
+                    %% We received a message we haven't seen before,
+                    %% but it is not the next message in the expected
+                    %% sequence.
+                    consumer_reply(TestPid,
+                                   {error, {unexpected_message, MsgNum}})
+            end;
+        #'basic.cancel'{} when CancelOnFailover ->
+            error_logger:info_msg("consumer ~p on ~p received basic.cancel: "
+                                  "resubscribing to ~p on ~p~n",
+                                  [self(), Channel, Queue, Channel]),
+            resubscribe(TestPid, Channel, Queue, CancelOnFailover,
+                        LowestSeen, MsgsToConsume);
+        #'basic.cancel'{} ->
+            exit(cancel_received_without_cancel_on_failover)
+    end.
+
+%%
+%% Private API
+%%
+
+resubscribe(TestPid, Channel, Queue, CancelOnFailover, LowestSeen,
+            MsgsToConsume) ->
+    amqp_channel:subscribe(
+      Channel, consume_method(Queue, CancelOnFailover), self()),
+    ok = receive #'basic.consume_ok'{} -> ok
+         end,
+    error_logger:info_msg("re-subscripting consumer ~p on ~p complete "
+                          "(received basic.consume_ok)",
+                          [self(), Channel]),
+    start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume).
+
+consume_method(Queue, CancelOnFailover) ->
+    Args = [{<<"x-cancel-on-ha-failover">>, bool, CancelOnFailover}],
+    #'basic.consume'{queue     = Queue,
+                     arguments = Args}.
+
+ack(#'basic.deliver'{delivery_tag = DeliveryTag}, Channel) ->
+    amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
+    ok.
+
+consumer_reply(TestPid, Reply) ->
+    TestPid ! {self(), Reply}.
diff --git a/rabbitmq-server/test/rabbit_ha_test_producer.erl b/rabbitmq-server/test/rabbit_ha_test_producer.erl
new file mode 100644 (file)
index 0000000..66dee3f
--- /dev/null
@@ -0,0 +1,119 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+-module(rabbit_ha_test_producer).
+
+-export([await_response/1, start/5, create/5]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+await_response(ProducerPid) ->
+    error_logger:info_msg("waiting for producer pid ~p~n", [ProducerPid]),
+    case receive {ProducerPid, Response} -> Response end of
+        ok                -> ok;
+        {error, _} = Else -> exit(Else);
+        Else              -> exit({weird_response, Else})
+    end.
+
+create(Channel, Queue, TestPid, Confirm, MsgsToSend) ->
+    ProducerPid = spawn_link(?MODULE, start, [Channel, Queue, TestPid,
+                                              Confirm, MsgsToSend]),
+    receive
+        {ProducerPid, started} -> ProducerPid
+    end.
+
+start(Channel, Queue, TestPid, Confirm, MsgsToSend) ->
+    ConfirmState =
+        case Confirm of
+            true  -> amqp_channel:register_confirm_handler(Channel, self()),
+                     #'confirm.select_ok'{} =
+                         amqp_channel:call(Channel, #'confirm.select'{}),
+                     gb_trees:empty();
+            false -> none
+        end,
+    TestPid ! {self(), started},
+    error_logger:info_msg("publishing ~w msgs on ~p~n", [MsgsToSend, Channel]),
+    producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend).
+
+%%
+%% Private API
+%%
+
+producer(_Channel, _Queue, TestPid, none, 0) ->
+    TestPid ! {self(), ok};
+producer(Channel, _Queue, TestPid, ConfirmState, 0) ->
+    error_logger:info_msg("awaiting confirms on channel ~p~n", [Channel]),
+    Msg = case drain_confirms(no_nacks, ConfirmState) of
+              no_nacks    -> ok;
+              nacks       -> {error, received_nacks};
+              {Nacks, CS} -> {error, {missing_confirms, Nacks,
+                                      lists:sort(gb_trees:keys(CS))}}
+          end,
+    TestPid ! {self(), Msg};
+
+producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend) ->
+    Method = #'basic.publish'{exchange    = <<"">>,
+                              routing_key = Queue,
+                              mandatory   = false,
+                              immediate   = false},
+
+    ConfirmState1 = maybe_record_confirm(ConfirmState, Channel, MsgsToSend),
+
+    amqp_channel:call(Channel, Method,
+                      #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+                                payload = list_to_binary(
+                                            integer_to_list(MsgsToSend))}),
+
+    producer(Channel, Queue, TestPid, ConfirmState1, MsgsToSend - 1).
+
+maybe_record_confirm(none, _, _) ->
+    none;
+maybe_record_confirm(ConfirmState, Channel, MsgsToSend) ->
+    SeqNo = amqp_channel:next_publish_seqno(Channel),
+    gb_trees:insert(SeqNo, MsgsToSend, ConfirmState).
+
+drain_confirms(Nacks, ConfirmState) ->
+    case gb_trees:is_empty(ConfirmState) of
+        true  -> Nacks;
+        false -> receive
+                     #'basic.ack'{delivery_tag = DeliveryTag,
+                                  multiple     = IsMulti} ->
+                         drain_confirms(Nacks,
+                                        delete_confirms(DeliveryTag, IsMulti,
+                                                        ConfirmState));
+                     #'basic.nack'{delivery_tag = DeliveryTag,
+                                   multiple     = IsMulti} ->
+                         drain_confirms(nacks,
+                                        delete_confirms(DeliveryTag, IsMulti,
+                                                        ConfirmState))
+                 after
+                     60000 -> {Nacks, ConfirmState}
+                 end
+    end.
+
+delete_confirms(DeliveryTag, false, ConfirmState) ->
+    gb_trees:delete(DeliveryTag, ConfirmState);
+delete_confirms(DeliveryTag, true, ConfirmState) ->
+    multi_confirm(DeliveryTag, ConfirmState).
+
+multi_confirm(DeliveryTag, ConfirmState) ->
+    case gb_trees:is_empty(ConfirmState) of
+        true  -> ConfirmState;
+        false -> {Key, _, ConfirmState1} = gb_trees:take_smallest(ConfirmState),
+                 case Key =< DeliveryTag of
+                     true  -> multi_confirm(DeliveryTag, ConfirmState1);
+                     false -> ConfirmState
+                 end
+    end.
diff --git a/rabbitmq-server/test/simple_ha_SUITE.erl b/rabbitmq-server/test/simple_ha_SUITE.erl
new file mode 100644 (file)
index 0000000..af85ad6
--- /dev/null
@@ -0,0 +1,216 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(simple_ha_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+    [
+      {group, cluster_size_2},
+      {group, cluster_size_3}
+    ].
+
+groups() ->
+    [
+      {cluster_size_2, [], [
+          rapid_redeclare,
+          declare_synchrony
+        ]},
+      {cluster_size_3, [], [
+          consume_survives_stop,
+          consume_survives_sigkill,
+          consume_survives_policy,
+          auto_resume,
+          auto_resume_no_ccn_client,
+          confirms_survive_stop,
+          confirms_survive_sigkill,
+          confirms_survive_policy
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+    rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 2}
+      ]);
+init_per_group(cluster_size_3, Config) ->
+    rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, 3}
+      ]).
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase),
+    ClusterSize = ?config(rmq_nodes_count, Config),
+    TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_clustered, true},
+        {rmq_nodename_suffix, Testcase},
+        {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps() ++ [
+        fun rabbit_ct_broker_helpers:set_ha_policy_all/1
+      ]).
+
+end_per_testcase(Testcase, Config) ->
+    Config1 = rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+rapid_redeclare(Config) ->
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+    Queue = <<"test">>,
+    [begin
+         amqp_channel:call(Ch, #'queue.declare'{queue   = Queue,
+                                                durable = true}),
+         amqp_channel:call(Ch, #'queue.delete'{queue  = Queue})
+     end || _I <- lists:seq(1, 20)],
+    ok.
+
+%% Check that by the time we get a declare-ok back, the slaves are up
+%% and in Mnesia.
+declare_synchrony(Config) ->
+    [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    RabbitCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+    HareCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
+    Q = <<"mirrored-queue">>,
+    declare(RabbitCh, Q),
+    amqp_channel:call(RabbitCh, #'confirm.select'{}),
+    amqp_channel:cast(RabbitCh, #'basic.publish'{routing_key = Q},
+                      #amqp_msg{props = #'P_basic'{delivery_mode = 2}}),
+    amqp_channel:wait_for_confirms(RabbitCh),
+    rabbit_ct_broker_helpers:kill_node(Config, Rabbit),
+
+    #'queue.declare_ok'{message_count = 1} = declare(HareCh, Q),
+    ok.
+
+declare(Ch, Name) ->
+    amqp_channel:call(Ch, #'queue.declare'{durable = true, queue = Name}).
+
+consume_survives_stop(Cf)     -> consume_survives(Cf, fun stop/2,    true).
+consume_survives_sigkill(Cf)  -> consume_survives(Cf, fun sigkill/2, true).
+consume_survives_policy(Cf)   -> consume_survives(Cf, fun policy/2,  true).
+auto_resume(Cf)               -> consume_survives(Cf, fun sigkill/2, false).
+auto_resume_no_ccn_client(Cf) -> consume_survives(Cf, fun sigkill/2, false,
+                                                  false).
+
+confirms_survive_stop(Cf)    -> confirms_survive(Cf, fun stop/2).
+confirms_survive_sigkill(Cf) -> confirms_survive(Cf, fun sigkill/2).
+confirms_survive_policy(Cf)  -> confirms_survive(Cf, fun policy/2).
+
+%%----------------------------------------------------------------------------
+
+consume_survives(Config, DeathFun, CancelOnFailover) ->
+    consume_survives(Config, DeathFun, CancelOnFailover, true).
+
+consume_survives(Config,
+                 DeathFun, CancelOnFailover, CCNSupported) ->
+    [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+    Channel1 = rabbit_ct_client_helpers:open_channel(Config, A),
+    Channel2 = rabbit_ct_client_helpers:open_channel(Config, B),
+    Channel3 = rabbit_ct_client_helpers:open_channel(Config, C),
+
+    %% declare the queue on the master, mirrored to the two slaves
+    Queue = <<"test">>,
+    amqp_channel:call(Channel1, #'queue.declare'{queue       = Queue,
+                                                 auto_delete = false}),
+
+    %% start up a consumer
+    ConsCh = case CCNSupported of
+                 true  -> Channel2;
+                 false -> Port = rabbit_ct_broker_helpers:get_node_config(
+                            Config, B, tcp_port_amqp),
+                          open_incapable_channel(Port)
+             end,
+    ConsumerPid = rabbit_ha_test_consumer:create(
+                    ConsCh, Queue, self(), CancelOnFailover, Msgs),
+
+    %% send a bunch of messages from the producer
+    ProducerPid = rabbit_ha_test_producer:create(Channel3, Queue,
+                                                 self(), false, Msgs),
+    DeathFun(Config, A),
+    %% verify that the consumer got all msgs, or die - the await_response
+    %% calls throw an exception if anything goes wrong....
+    rabbit_ha_test_consumer:await_response(ConsumerPid),
+    rabbit_ha_test_producer:await_response(ProducerPid),
+    ok.
+
+confirms_survive(Config, DeathFun) ->
+    [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+    Node1Channel = rabbit_ct_client_helpers:open_channel(Config, A),
+    Node2Channel = rabbit_ct_client_helpers:open_channel(Config, B),
+
+    %% declare the queue on the master, mirrored to the two slaves
+    Queue = <<"test">>,
+    amqp_channel:call(Node1Channel,#'queue.declare'{queue       = Queue,
+                                                    auto_delete = false,
+                                                    durable     = true}),
+
+    %% send a bunch of messages from the producer
+    ProducerPid = rabbit_ha_test_producer:create(Node2Channel, Queue,
+                                                 self(), true, Msgs),
+    DeathFun(Config, A),
+    rabbit_ha_test_producer:await_response(ProducerPid),
+    ok.
+
+stop(Config, Node) ->
+    rabbit_ct_broker_helpers:stop_node_after(Config, Node, 50).
+
+sigkill(Config, Node) ->
+    rabbit_ct_broker_helpers:kill_node_after(Config, Node, 50).
+
+policy(Config, Node)->
+    Nodes = [
+      rabbit_misc:atom_to_binary(N)
+      || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+         N =/= Node],
+    rabbit_ct_broker_helpers:set_ha_policy(Config, Node, <<".*">>,
+      {<<"nodes">>, Nodes}).
+
+open_incapable_channel(NodePort) ->
+    Props = [{<<"capabilities">>, table, []}],
+    {ok, ConsConn} =
+        amqp_connection:start(#amqp_params_network{port              = NodePort,
+                                                   client_properties = Props}),
+    {ok, Ch} = amqp_connection:open_channel(ConsConn),
+    Ch.
diff --git a/rabbitmq-server/test/sup_delayed_restart_SUITE.erl b/rabbitmq-server/test/sup_delayed_restart_SUITE.erl
new file mode 100644 (file)
index 0000000..e495f57
--- /dev/null
@@ -0,0 +1,91 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(sup_delayed_restart_SUITE).
+
+-behaviour(supervisor2).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+all() ->
+    [
+      delayed_restart
+    ].
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+delayed_restart(_Config) ->
+    passed = with_sup(simple_one_for_one,
+                      fun (SupPid) ->
+                              {ok, _ChildPid} =
+                                  supervisor2:start_child(SupPid, []),
+                              test_supervisor_delayed_restart(SupPid)
+                      end),
+    passed = with_sup(one_for_one, fun test_supervisor_delayed_restart/1).
+
+test_supervisor_delayed_restart(SupPid) ->
+    ok = ping_child(SupPid),
+    ok = exit_child(SupPid),
+    timer:sleep(100),
+    ok = ping_child(SupPid),
+    ok = exit_child(SupPid),
+    timer:sleep(100),
+    timeout = ping_child(SupPid),
+    timer:sleep(1010),
+    ok = ping_child(SupPid),
+    passed.
+
+with_sup(RestartStrategy, Fun) ->
+    {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy]),
+    Res = Fun(SupPid),
+    unlink(SupPid),
+    exit(SupPid, shutdown),
+    Res.
+
+init([RestartStrategy]) ->
+    {ok, {{RestartStrategy, 1, 1},
+          [{test, {?MODULE, start_child, []}, {permanent, 1},
+            16#ffffffff, worker, [?MODULE]}]}}.
+
+start_child() ->
+    {ok, proc_lib:spawn_link(fun run_child/0)}.
+
+ping_child(SupPid) ->
+    Ref = make_ref(),
+    with_child_pid(SupPid, fun(ChildPid) -> ChildPid ! {ping, Ref, self()} end),
+    receive {pong, Ref} -> ok
+    after 1000          -> timeout
+    end.
+
+exit_child(SupPid) ->
+    with_child_pid(SupPid, fun(ChildPid) -> exit(ChildPid, abnormal) end),
+    ok.
+
+with_child_pid(SupPid, Fun) ->
+    case supervisor2:which_children(SupPid) of
+        [{_Id, undefined, worker, [?MODULE]}] -> ok;
+        [{_Id,  ChildPid, worker, [?MODULE]}] -> Fun(ChildPid);
+        []                                     -> ok
+    end.
+
+run_child() ->
+    receive {ping, Ref, Pid} -> Pid ! {pong, Ref},
+                                run_child()
+    end.
diff --git a/rabbitmq-server/test/sync_detection_SUITE.erl b/rabbitmq-server/test/sync_detection_SUITE.erl
new file mode 100644 (file)
index 0000000..1e0a66e
--- /dev/null
@@ -0,0 +1,252 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(sync_detection_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(LOOP_RECURSION_DELAY, 100).
+
+all() ->
+    [
+      {group, cluster_size_2},
+      {group, cluster_size_3}
+    ].
+
+groups() ->
+    [
+      {cluster_size_2, [], [
+          slave_synchronization
+        ]},
+      {cluster_size_3, [], [
+          slave_synchronization_ttl
+        ]}
+    ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+    rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]).
+
+end_per_group(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase),
+    ClusterSize = ?config(rmq_nodes_count, Config),
+    TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+    Config1 = rabbit_ct_helpers:set_config(Config, [
+        {rmq_nodes_count, ClusterSize},
+        {rmq_nodes_clustered, true},
+        {rmq_nodename_suffix, Testcase},
+        {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+      ]),
+    rabbit_ct_helpers:run_steps(Config1,
+      rabbit_ct_broker_helpers:setup_steps() ++
+      rabbit_ct_client_helpers:setup_steps() ++ [
+        fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1,
+        fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1
+      ]).
+
+end_per_testcase(Testcase, Config) ->
+    Config1 = rabbit_ct_helpers:run_steps(Config,
+      rabbit_ct_client_helpers:teardown_steps() ++
+      rabbit_ct_broker_helpers:teardown_steps()),
+    rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+slave_synchronization(Config) ->
+    [Master, Slave] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    Channel = rabbit_ct_client_helpers:open_channel(Config, Master),
+    Queue = <<"ha.two.test">>,
+    #'queue.declare_ok'{} =
+        amqp_channel:call(Channel, #'queue.declare'{queue       = Queue,
+                                                    auto_delete = false}),
+
+    %% The comments on the right are the queue length and the pending acks on
+    %% the master.
+    rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+
+    %% We get and ack one message when the slave is down, and check that when we
+    %% start the slave it's not marked as synced until ack the message.  We also
+    %% publish another message when the slave is up.
+    send_dummy_message(Channel, Queue),                                 % 1 - 0
+    {#'basic.get_ok'{delivery_tag = Tag1}, _} =
+        amqp_channel:call(Channel, #'basic.get'{queue = Queue}),        % 0 - 1
+
+    rabbit_ct_broker_helpers:start_broker(Config, Slave),
+
+    slave_unsynced(Master, Queue),
+    send_dummy_message(Channel, Queue),                                 % 1 - 1
+    slave_unsynced(Master, Queue),
+
+    amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag1}),      % 1 - 0
+
+    slave_synced(Master, Queue),
+
+    %% We restart the slave and we send a message, so that the slave will only
+    %% have one of the messages.
+    rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+    rabbit_ct_broker_helpers:start_broker(Config, Slave),
+
+    send_dummy_message(Channel, Queue),                                 % 2 - 0
+
+    slave_unsynced(Master, Queue),
+
+    %% We reject the message that the slave doesn't have, and verify that it's
+    %% still unsynced
+    {#'basic.get_ok'{delivery_tag = Tag2}, _} =
+        amqp_channel:call(Channel, #'basic.get'{queue = Queue}),        % 1 - 1
+    slave_unsynced(Master, Queue),
+    amqp_channel:cast(Channel, #'basic.reject'{ delivery_tag = Tag2,
+                                                requeue      = true }), % 2 - 0
+    slave_unsynced(Master, Queue),
+    {#'basic.get_ok'{delivery_tag = Tag3}, _} =
+        amqp_channel:call(Channel, #'basic.get'{queue = Queue}),        % 1 - 1
+    amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag3}),      % 1 - 0
+    slave_synced(Master, Queue),
+    {#'basic.get_ok'{delivery_tag = Tag4}, _} =
+        amqp_channel:call(Channel, #'basic.get'{queue = Queue}),        % 0 - 1
+    amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag4}),      % 0 - 0
+    slave_synced(Master, Queue).
+
+slave_synchronization_ttl(Config) ->
+    [Master, Slave, DLX] = rabbit_ct_broker_helpers:get_node_configs(Config,
+      nodename),
+    Channel = rabbit_ct_client_helpers:open_channel(Config, Master),
+    DLXChannel = rabbit_ct_client_helpers:open_channel(Config, DLX),
+
+    %% We declare a DLX queue to wait for messages to be TTL'ed
+    DLXQueue = <<"dlx-queue">>,
+    #'queue.declare_ok'{} =
+        amqp_channel:call(Channel, #'queue.declare'{queue       = DLXQueue,
+                                                    auto_delete = false}),
+
+    TestMsgTTL = 5000,
+    Queue = <<"ha.two.test">>,
+    %% Sadly we need fairly high numbers for the TTL because starting/stopping
+    %% nodes takes a fair amount of time.
+    Args = [{<<"x-message-ttl">>, long, TestMsgTTL},
+            {<<"x-dead-letter-exchange">>, longstr, <<>>},
+            {<<"x-dead-letter-routing-key">>, longstr, DLXQueue}],
+    #'queue.declare_ok'{} =
+        amqp_channel:call(Channel, #'queue.declare'{queue       = Queue,
+                                                    auto_delete = false,
+                                                    arguments   = Args}),
+
+    slave_synced(Master, Queue),
+
+    %% All unknown
+    rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+    send_dummy_message(Channel, Queue),
+    send_dummy_message(Channel, Queue),
+    rabbit_ct_broker_helpers:start_broker(Config, Slave),
+    slave_unsynced(Master, Queue),
+    wait_for_messages(DLXQueue, DLXChannel, 2),
+    slave_synced(Master, Queue),
+
+    %% 1 unknown, 1 known
+    rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+    send_dummy_message(Channel, Queue),
+    rabbit_ct_broker_helpers:start_broker(Config, Slave),
+    slave_unsynced(Master, Queue),
+    send_dummy_message(Channel, Queue),
+    slave_unsynced(Master, Queue),
+    wait_for_messages(DLXQueue, DLXChannel, 2),
+    slave_synced(Master, Queue),
+
+    %% %% both known
+    send_dummy_message(Channel, Queue),
+    send_dummy_message(Channel, Queue),
+    slave_synced(Master, Queue),
+    wait_for_messages(DLXQueue, DLXChannel, 2),
+    slave_synced(Master, Queue),
+
+    ok.
+
+send_dummy_message(Channel, Queue) ->
+    Payload = <<"foo">>,
+    Publish = #'basic.publish'{exchange = <<>>, routing_key = Queue},
+    amqp_channel:cast(Channel, Publish, #amqp_msg{payload = Payload}).
+
+slave_pids(Node, Queue) ->
+    {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup,
+                       [rabbit_misc:r(<<"/">>, queue, Queue)]),
+    SSP = synchronised_slave_pids,
+    [{SSP, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [SSP]]),
+    case Pids of
+        '' -> [];
+        _  -> Pids
+    end.
+
+%% The mnesia syncronization takes a while, but we don't want to wait for the
+%% test to fail, since the timetrap is quite high.
+wait_for_sync_status(Status, Node, Queue) ->
+    Max = 10000 / ?LOOP_RECURSION_DELAY,
+    wait_for_sync_status(0, Max, Status, Node, Queue).
+
+wait_for_sync_status(N, Max, Status, Node, Queue) when N >= Max ->
+    erlang:error({sync_status_max_tries_failed,
+                  [{queue, Queue},
+                   {node, Node},
+                   {expected_status, Status},
+                   {max_tried, Max}]});
+wait_for_sync_status(N, Max, Status, Node, Queue) ->
+    Synced = length(slave_pids(Node, Queue)) =:= 1,
+    case Synced =:= Status of
+        true  -> ok;
+        false -> timer:sleep(?LOOP_RECURSION_DELAY),
+                 wait_for_sync_status(N + 1, Max, Status, Node, Queue)
+    end.
+
+slave_synced(Node, Queue) ->
+    wait_for_sync_status(true, Node, Queue).
+
+slave_unsynced(Node, Queue) ->
+    wait_for_sync_status(false, Node, Queue).
+
+wait_for_messages(Queue, Channel, N) ->
+    Sub = #'basic.consume'{queue = Queue},
+    #'basic.consume_ok'{consumer_tag = CTag} = amqp_channel:call(Channel, Sub),
+    receive
+        #'basic.consume_ok'{} -> ok
+    end,
+    lists:foreach(
+      fun (_) -> receive
+                     {#'basic.deliver'{delivery_tag = Tag}, _Content} ->
+                         amqp_channel:cast(Channel,
+                                           #'basic.ack'{delivery_tag = Tag})
+                 end
+      end, lists:seq(1, N)),
+    amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = CTag}).
diff --git a/rabbitmq-server/test/unit_SUITE.erl b/rabbitmq-server/test/unit_SUITE.erl
new file mode 100644 (file)
index 0000000..ba0f43f
--- /dev/null
@@ -0,0 +1,735 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(unit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-compile(export_all).
+
+all() ->
+    [
+      {group, parallel_tests}
+    ].
+
+groups() ->
+    [
+      {parallel_tests, [parallel], [
+          arguments_parser,
+          filtering_flags_parsing,
+          {basic_header_handling, [parallel], [
+              write_table_with_invalid_existing_type,
+              invalid_existing_headers,
+              disparate_invalid_header_entries_accumulate_separately,
+              corrupt_or_invalid_headers_are_overwritten,
+              invalid_same_header_entry_accumulation
+            ]},
+          content_framing,
+          content_transcoding,
+          pg_local,
+          pmerge,
+          plmerge,
+          priority_queue,
+          {resource_monitor, [parallel], [
+              parse_information_unit
+            ]},
+          {supervisor2, [], [
+              check_shutdown_stop,
+              check_shutdown_ignored
+            ]},
+          table_codec,
+          {truncate, [parallel], [
+              short_examples_exactly,
+              term_limit,
+              large_examples_for_size
+            ]},
+          unfold,
+          version_equivalance,
+          {vm_memory_monitor, [parallel], [
+              parse_line_linux
+            ]}
+        ]}
+    ].
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+%% -------------------------------------------------------------------
+%% Argument parsing.
+%% -------------------------------------------------------------------
+
+arguments_parser(_Config) ->
+    GlobalOpts1 = [{"-f1", flag}, {"-o1", {option, "foo"}}],
+    Commands1 = [command1, {command2, [{"-f2", flag}, {"-o2", {option, "bar"}}]}],
+
+    GetOptions =
+        fun (Args) ->
+                rabbit_cli:parse_arguments(Commands1, GlobalOpts1, "-n", Args)
+        end,
+
+    check_parse_arguments(no_command, GetOptions, []),
+    check_parse_arguments(no_command, GetOptions, ["foo", "bar"]),
+    check_parse_arguments(
+      {ok, {command1, [{"-f1", false}, {"-o1", "foo"}], []}},
+      GetOptions, ["command1"]),
+    check_parse_arguments(
+      {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}},
+      GetOptions, ["command1", "-o1", "blah"]),
+    check_parse_arguments(
+      {ok, {command1, [{"-f1", true}, {"-o1", "foo"}], []}},
+      GetOptions, ["command1", "-f1"]),
+    check_parse_arguments(
+      {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}},
+      GetOptions, ["-o1", "blah", "command1"]),
+    check_parse_arguments(
+      {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], ["quux"]}},
+      GetOptions, ["-o1", "blah", "command1", "quux"]),
+    check_parse_arguments(
+      {ok, {command1, [{"-f1", true}, {"-o1", "blah"}], ["quux", "baz"]}},
+      GetOptions, ["command1", "quux", "-f1", "-o1", "blah", "baz"]),
+    %% For duplicate flags, the last one counts
+    check_parse_arguments(
+      {ok, {command1, [{"-f1", false}, {"-o1", "second"}], []}},
+      GetOptions, ["-o1", "first", "command1", "-o1", "second"]),
+    %% If the flag "eats" the command, the command won't be recognised
+    check_parse_arguments(no_command, GetOptions,
+                          ["-o1", "command1", "quux"]),
+    %% If a flag eats another flag, the eaten flag won't be recognised
+    check_parse_arguments(
+      {ok, {command1, [{"-f1", false}, {"-o1", "-f1"}], []}},
+      GetOptions, ["command1", "-o1", "-f1"]),
+
+    %% Now for some command-specific flags...
+    check_parse_arguments(
+      {ok, {command2, [{"-f1", false}, {"-f2", false},
+                       {"-o1", "foo"}, {"-o2", "bar"}], []}},
+      GetOptions, ["command2"]),
+
+    check_parse_arguments(
+      {ok, {command2, [{"-f1", false}, {"-f2", true},
+                       {"-o1", "baz"}, {"-o2", "bar"}], ["quux", "foo"]}},
+      GetOptions, ["-f2", "command2", "quux", "-o1", "baz", "foo"]),
+
+    passed.
+
+check_parse_arguments(ExpRes, Fun, As) ->
+    SortRes =
+        fun (no_command)          -> no_command;
+            ({ok, {C, KVs, As1}}) -> {ok, {C, lists:sort(KVs), As1}}
+        end,
+
+    true = SortRes(ExpRes) =:= SortRes(Fun(As)).
+
+filtering_flags_parsing(_Config) ->
+    Cases = [{[], [], []}
+            ,{[{"--online", true}], ["--offline", "--online", "--third-option"], [false, true, false]}
+            ,{[{"--online", true}, {"--third-option", true}, {"--offline", true}], ["--offline", "--online", "--third-option"], [true, true, true]}
+            ,{[], ["--offline", "--online", "--third-option"], [true, true, true]}
+            ],
+    lists:foreach(fun({Vals, Opts, Expect}) ->
+                          case rabbit_cli:filter_opts(Vals, Opts) of
+                              Expect ->
+                                  ok;
+                              Got ->
+                                  exit({no_match, Got, Expect, {args, Vals, Opts}})
+                          end
+                  end,
+                  Cases).
+
+%% -------------------------------------------------------------------
+%% basic_header_handling.
+%% -------------------------------------------------------------------
+
+-define(XDEATH_TABLE,
+        [{<<"reason">>,       longstr,   <<"blah">>},
+         {<<"queue">>,        longstr,   <<"foo.bar.baz">>},
+         {<<"exchange">>,     longstr,   <<"my-exchange">>},
+         {<<"routing-keys">>, array,     []}]).
+
+-define(ROUTE_TABLE, [{<<"redelivered">>, bool, <<"true">>}]).
+
+-define(BAD_HEADER(K), {<<K>>, longstr, <<"bad ", K>>}).
+-define(BAD_HEADER2(K, Suf), {<<K>>, longstr, <<"bad ", K, Suf>>}).
+-define(FOUND_BAD_HEADER(K), {<<K>>, array, [{longstr, <<"bad ", K>>}]}).
+
+write_table_with_invalid_existing_type(_Config) ->
+    prepend_check(<<"header1">>, ?XDEATH_TABLE, [?BAD_HEADER("header1")]).
+
+invalid_existing_headers(_Config) ->
+    Headers =
+        prepend_check(<<"header2">>, ?ROUTE_TABLE, [?BAD_HEADER("header2")]),
+    {array, [{table, ?ROUTE_TABLE}]} =
+        rabbit_misc:table_lookup(Headers, <<"header2">>),
+    passed.
+
+disparate_invalid_header_entries_accumulate_separately(_Config) ->
+    BadHeaders = [?BAD_HEADER("header2")],
+    Headers = prepend_check(<<"header2">>, ?ROUTE_TABLE, BadHeaders),
+    Headers2 = prepend_check(<<"header1">>, ?XDEATH_TABLE,
+                             [?BAD_HEADER("header1") | Headers]),
+    {table, [?FOUND_BAD_HEADER("header1"),
+             ?FOUND_BAD_HEADER("header2")]} =
+        rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
+    passed.
+
+corrupt_or_invalid_headers_are_overwritten(_Config) ->
+    Headers0 = [?BAD_HEADER("header1"),
+                ?BAD_HEADER("x-invalid-headers")],
+    Headers1 = prepend_check(<<"header1">>, ?XDEATH_TABLE, Headers0),
+    {table,[?FOUND_BAD_HEADER("header1"),
+            ?FOUND_BAD_HEADER("x-invalid-headers")]} =
+        rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
+    passed.
+
+invalid_same_header_entry_accumulation(_Config) ->
+    BadHeader1 = ?BAD_HEADER2("header1", "a"),
+    Headers = prepend_check(<<"header1">>, ?ROUTE_TABLE, [BadHeader1]),
+    Headers2 = prepend_check(<<"header1">>, ?ROUTE_TABLE,
+                             [?BAD_HEADER2("header1", "b") | Headers]),
+    {table, InvalidHeaders} =
+        rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
+    {array, [{longstr,<<"bad header1b">>},
+             {longstr,<<"bad header1a">>}]} =
+        rabbit_misc:table_lookup(InvalidHeaders, <<"header1">>),
+    passed.
+
+prepend_check(HeaderKey, HeaderTable, Headers) ->
+    Headers1 = rabbit_basic:prepend_table_header(
+                 HeaderKey, HeaderTable, Headers),
+    {table, Invalid} =
+        rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
+    {Type, Value} = rabbit_misc:table_lookup(Headers, HeaderKey),
+    {array, [{Type, Value} | _]} =
+        rabbit_misc:table_lookup(Invalid, HeaderKey),
+    Headers1.
+
+%% -------------------------------------------------------------------
+%% pg_local.
+%% -------------------------------------------------------------------
+
+pg_local(_Config) ->
+    [P, Q] = [spawn(fun () -> receive X -> X end end) || _ <- [x, x]],
+    check_pg_local(ok, [], []),
+    check_pg_local(pg_local:join(a, P), [P], []),
+    check_pg_local(pg_local:join(b, P), [P], [P]),
+    check_pg_local(pg_local:join(a, P), [P, P], [P]),
+    check_pg_local(pg_local:join(a, Q), [P, P, Q], [P]),
+    check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q]),
+    check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q, Q]),
+    check_pg_local(pg_local:leave(a, P), [P, Q], [P, Q, Q]),
+    check_pg_local(pg_local:leave(b, P), [P, Q], [Q, Q]),
+    check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
+    check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
+    [begin X ! done,
+           Ref = erlang:monitor(process, X),
+           receive {'DOWN', Ref, process, X, _Info} -> ok end
+     end  || X <- [P, Q]],
+    check_pg_local(ok, [], []),
+    passed.
+
+check_pg_local(ok, APids, BPids) ->
+    ok = pg_local:sync(),
+    [true, true] = [lists:sort(Pids) == lists:sort(pg_local:get_members(Key)) ||
+                       {Key, Pids} <- [{a, APids}, {b, BPids}]].
+
+%% -------------------------------------------------------------------
+%% priority_queue.
+%% -------------------------------------------------------------------
+
+priority_queue(_Config) ->
+
+    false = priority_queue:is_queue(not_a_queue),
+
+    %% empty Q
+    Q = priority_queue:new(),
+    {true, true, 0, [], []} = test_priority_queue(Q),
+
+    %% 1-4 element no-priority Q
+    true = lists:all(fun (X) -> X =:= passed end,
+                     lists:map(fun test_simple_n_element_queue/1,
+                               lists:seq(1, 4))),
+
+    %% 1-element priority Q
+    Q1 = priority_queue:in(foo, 1, priority_queue:new()),
+    {true, false, 1, [{1, foo}], [foo]} =
+        test_priority_queue(Q1),
+
+    %% 2-element same-priority Q
+    Q2 = priority_queue:in(bar, 1, Q1),
+    {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
+        test_priority_queue(Q2),
+
+    %% 2-element different-priority Q
+    Q3 = priority_queue:in(bar, 2, Q1),
+    {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+        test_priority_queue(Q3),
+
+    %% 1-element negative priority Q
+    Q4 = priority_queue:in(foo, -1, priority_queue:new()),
+    {true, false, 1, [{-1, foo}], [foo]} = test_priority_queue(Q4),
+
+    %% merge 2 * 1-element no-priority Qs
+    Q5 = priority_queue:join(priority_queue:in(foo, Q),
+                             priority_queue:in(bar, Q)),
+    {true, false, 2, [{0, foo}, {0, bar}], [foo, bar]} =
+        test_priority_queue(Q5),
+
+    %% merge 1-element no-priority Q with 1-element priority Q
+    Q6 = priority_queue:join(priority_queue:in(foo, Q),
+                             priority_queue:in(bar, 1, Q)),
+    {true, false, 2, [{1, bar}, {0, foo}], [bar, foo]} =
+        test_priority_queue(Q6),
+
+    %% merge 1-element priority Q with 1-element no-priority Q
+    Q7 = priority_queue:join(priority_queue:in(foo, 1, Q),
+                             priority_queue:in(bar, Q)),
+    {true, false, 2, [{1, foo}, {0, bar}], [foo, bar]} =
+        test_priority_queue(Q7),
+
+    %% merge 2 * 1-element same-priority Qs
+    Q8 = priority_queue:join(priority_queue:in(foo, 1, Q),
+                             priority_queue:in(bar, 1, Q)),
+    {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
+        test_priority_queue(Q8),
+
+    %% merge 2 * 1-element different-priority Qs
+    Q9 = priority_queue:join(priority_queue:in(foo, 1, Q),
+                             priority_queue:in(bar, 2, Q)),
+    {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+        test_priority_queue(Q9),
+
+    %% merge 2 * 1-element different-priority Qs (other way around)
+    Q10 = priority_queue:join(priority_queue:in(bar, 2, Q),
+                              priority_queue:in(foo, 1, Q)),
+    {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+        test_priority_queue(Q10),
+
+    %% merge 2 * 2-element multi-different-priority Qs
+    Q11 = priority_queue:join(Q6, Q5),
+    {true, false, 4, [{1, bar}, {0, foo}, {0, foo}, {0, bar}],
+     [bar, foo, foo, bar]} = test_priority_queue(Q11),
+
+    %% and the other way around
+    Q12 = priority_queue:join(Q5, Q6),
+    {true, false, 4, [{1, bar}, {0, foo}, {0, bar}, {0, foo}],
+     [bar, foo, bar, foo]} = test_priority_queue(Q12),
+
+    %% merge with negative priorities
+    Q13 = priority_queue:join(Q4, Q5),
+    {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
+        test_priority_queue(Q13),
+
+    %% and the other way around
+    Q14 = priority_queue:join(Q5, Q4),
+    {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
+        test_priority_queue(Q14),
+
+    %% joins with empty queues:
+    Q1 = priority_queue:join(Q, Q1),
+    Q1 = priority_queue:join(Q1, Q),
+
+    %% insert with priority into non-empty zero-priority queue
+    Q15 = priority_queue:in(baz, 1, Q5),
+    {true, false, 3, [{1, baz}, {0, foo}, {0, bar}], [baz, foo, bar]} =
+        test_priority_queue(Q15),
+
+    %% 1-element infinity priority Q
+    Q16 = priority_queue:in(foo, infinity, Q),
+    {true, false, 1, [{infinity, foo}], [foo]} = test_priority_queue(Q16),
+
+    %% add infinity to 0-priority Q
+    Q17 = priority_queue:in(foo, infinity, priority_queue:in(bar, Q)),
+    {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
+        test_priority_queue(Q17),
+
+    %% and the other way around
+    Q18 = priority_queue:in(bar, priority_queue:in(foo, infinity, Q)),
+    {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
+        test_priority_queue(Q18),
+
+    %% add infinity to mixed-priority Q
+    Q19 = priority_queue:in(qux, infinity, Q3),
+    {true, false, 3, [{infinity, qux}, {2, bar}, {1, foo}], [qux, bar, foo]} =
+        test_priority_queue(Q19),
+
+    %% merge the above with a negative priority Q
+    Q20 = priority_queue:join(Q19, Q4),
+    {true, false, 4, [{infinity, qux}, {2, bar}, {1, foo}, {-1, foo}],
+     [qux, bar, foo, foo]} = test_priority_queue(Q20),
+
+    %% merge two infinity priority queues
+    Q21 = priority_queue:join(priority_queue:in(foo, infinity, Q),
+                              priority_queue:in(bar, infinity, Q)),
+    {true, false, 2, [{infinity, foo}, {infinity, bar}], [foo, bar]} =
+        test_priority_queue(Q21),
+
+    %% merge two mixed priority with infinity queues
+    Q22 = priority_queue:join(Q18, Q20),
+    {true, false, 6, [{infinity, foo}, {infinity, qux}, {2, bar}, {1, foo},
+                      {0, bar}, {-1, foo}], [foo, qux, bar, foo, bar, foo]} =
+        test_priority_queue(Q22),
+
+    passed.
+
+priority_queue_in_all(Q, L) ->
+    lists:foldl(fun (X, Acc) -> priority_queue:in(X, Acc) end, Q, L).
+
+priority_queue_out_all(Q) ->
+    case priority_queue:out(Q) of
+        {empty, _}       -> [];
+        {{value, V}, Q1} -> [V | priority_queue_out_all(Q1)]
+    end.
+
+test_priority_queue(Q) ->
+    {priority_queue:is_queue(Q),
+     priority_queue:is_empty(Q),
+     priority_queue:len(Q),
+     priority_queue:to_list(Q),
+     priority_queue_out_all(Q)}.
+
+test_simple_n_element_queue(N) ->
+    Items = lists:seq(1, N),
+    Q = priority_queue_in_all(priority_queue:new(), Items),
+    ToListRes = [{0, X} || X <- Items],
+    {true, false, N, ToListRes, Items} = test_priority_queue(Q),
+    passed.
+
+%% ---------------------------------------------------------------------------
+%% resource_monitor.
+%% ---------------------------------------------------------------------------
+
+parse_information_unit(_Config) ->
+    lists:foreach(fun ({S, V}) ->
+                          V = rabbit_resource_monitor_misc:parse_information_unit(S)
+                  end,
+                  [
+                   {"1000", {ok, 1000}},
+
+                   {"10kB", {ok, 10000}},
+                   {"10MB", {ok, 10000000}},
+                   {"10GB", {ok, 10000000000}},
+
+                   {"10kiB", {ok, 10240}},
+                   {"10MiB", {ok, 10485760}},
+                   {"10GiB", {ok, 10737418240}},
+
+                   {"10k", {ok, 10240}},
+                   {"10M", {ok, 10485760}},
+                   {"10G", {ok, 10737418240}},
+
+                   {"10KB", {ok, 10000}},
+                   {"10K",  {ok, 10240}},
+                   {"10m",  {ok, 10485760}},
+                   {"10Mb", {ok, 10000000}},
+
+                   {"0MB",  {ok, 0}},
+
+                   {"10 k", {error, parse_error}},
+                   {"MB", {error, parse_error}},
+                   {"", {error, parse_error}},
+                   {"0.5GB", {error, parse_error}},
+                   {"10TB", {error, parse_error}}
+                  ]),
+    passed.
+
+%% ---------------------------------------------------------------------------
+%% supervisor2.
+%% ---------------------------------------------------------------------------
+
+check_shutdown_stop(_Config) ->
+    ok = check_shutdown(stop,    200, 200, 2000).
+
+check_shutdown_ignored(_Config) ->
+    ok = check_shutdown(ignored,   1,   2, 2000).
+
+check_shutdown(SigStop, Iterations, ChildCount, SupTimeout) ->
+    {ok, Sup} = supervisor2:start_link(dummy_supervisor2, [SupTimeout]),
+    Res = lists:foldl(
+            fun (I, ok) ->
+                    TestSupPid = erlang:whereis(dummy_supervisor2),
+                    ChildPids =
+                        [begin
+                             {ok, ChildPid} =
+                                 supervisor2:start_child(TestSupPid, []),
+                             ChildPid
+                         end || _ <- lists:seq(1, ChildCount)],
+                    MRef = erlang:monitor(process, TestSupPid),
+                    [P ! SigStop || P <- ChildPids],
+                    ok = supervisor2:terminate_child(Sup, test_sup),
+                    {ok, _} = supervisor2:restart_child(Sup, test_sup),
+                    receive
+                        {'DOWN', MRef, process, TestSupPid, shutdown} ->
+                            ok;
+                        {'DOWN', MRef, process, TestSupPid, Reason} ->
+                            {error, {I, Reason}}
+                    end;
+                (_, R) ->
+                    R
+            end, ok, lists:seq(1, Iterations)),
+    unlink(Sup),
+    MSupRef = erlang:monitor(process, Sup),
+    exit(Sup, shutdown),
+    receive
+        {'DOWN', MSupRef, process, Sup, _Reason} ->
+            ok
+    end,
+    Res.
+
+%% ---------------------------------------------------------------------------
+%% truncate.
+%% ---------------------------------------------------------------------------
+
+short_examples_exactly(_Config) ->
+    F = fun (Term, Exp) ->
+                Exp = truncate:term(Term, {1, {10, 10, 5, 5}}),
+                Term = truncate:term(Term, {100000, {10, 10, 5, 5}})
+        end,
+    FSmall = fun (Term, Exp) ->
+                     Exp = truncate:term(Term, {1, {2, 2, 2, 2}}),
+                     Term = truncate:term(Term, {100000, {2, 2, 2, 2}})
+             end,
+    F([], []),
+    F("h", "h"),
+    F("hello world", "hello w..."),
+    F([[h,e,l,l,o,' ',w,o,r,l,d]], [[h,e,l,l,o,'...']]),
+    F([a|b], [a|b]),
+    F(<<"hello">>, <<"hello">>),
+    F([<<"hello world">>], [<<"he...">>]),
+    F(<<1:1>>, <<1:1>>),
+    F(<<1:81>>, <<0:56, "...">>),
+    F({{{{a}}},{b},c,d,e,f,g,h,i,j,k}, {{{'...'}},{b},c,d,e,f,g,h,i,j,'...'}),
+    FSmall({a,30,40,40,40,40}, {a,30,'...'}),
+    FSmall([a,30,40,40,40,40], [a,30,'...']),
+    P = spawn(fun() -> receive die -> ok end end),
+    F([0, 0.0, <<1:1>>, F, P], [0, 0.0, <<1:1>>, F, P]),
+    P ! die,
+    R = make_ref(),
+    F([R], [R]),
+    ok.
+
+term_limit(_Config) ->
+    W = erlang:system_info(wordsize),
+    S = <<"abc">>,
+    1 = truncate:term_size(S, 4, W),
+    limit_exceeded = truncate:term_size(S, 3, W),
+    case 100 - truncate:term_size([S, S], 100, W) of
+        22 -> ok; %% 32 bit
+        38 -> ok  %% 64 bit
+    end,
+    case 100 - truncate:term_size([S, [S]], 100, W) of
+        30 -> ok; %% ditto
+        54 -> ok
+    end,
+    limit_exceeded = truncate:term_size([S, S], 6, W),
+    ok.
+
+large_examples_for_size(_Config) ->
+    %% Real world values
+    Shrink = fun(Term) -> truncate:term(Term, {1, {1000, 100, 50, 5}}) end,
+    TestSize = fun(Term) ->
+                       true = 5000000 < size(term_to_binary(Term)),
+                       true = 500000 > size(term_to_binary(Shrink(Term)))
+               end,
+    TestSize(lists:seq(1, 5000000)),
+    TestSize(recursive_list(1000, 10)),
+    TestSize(recursive_list(5000, 20)),
+    TestSize(gb_sets:from_list([I || I <- lists:seq(1, 1000000)])),
+    TestSize(gb_trees:from_orddict([{I, I} || I <- lists:seq(1, 1000000)])),
+    ok.
+
+recursive_list(S, 0) -> lists:seq(1, S);
+recursive_list(S, N) -> [recursive_list(S div N, N-1) || _ <- lists:seq(1, S)].
+
+%% ---------------------------------------------------------------------------
+%% vm_memory_monitor.
+%% ---------------------------------------------------------------------------
+
+parse_line_linux(_Config) ->
+    lists:foreach(fun ({S, {K, V}}) ->
+                          {K, V} = vm_memory_monitor:parse_line_linux(S)
+                  end,
+                  [{"MemTotal:      0 kB",        {'MemTotal', 0}},
+                   {"MemTotal:      502968 kB  ", {'MemTotal', 515039232}},
+                   {"MemFree:         178232 kB", {'MemFree',  182509568}},
+                   {"MemTotal:         50296888", {'MemTotal', 50296888}},
+                   {"MemTotal         502968 kB", {'MemTotal', 515039232}},
+                   {"MemTotal     50296866   ",   {'MemTotal', 50296866}}]),
+    ok.
+
+%% ---------------------------------------------------------------------------
+%% Unordered tests (originally from rabbit_tests.erl).
+%% ---------------------------------------------------------------------------
+
+%% Test that content frames don't exceed frame-max
+content_framing(_Config) ->
+    %% no content
+    passed = test_content_framing(4096, <<>>),
+    %% easily fit in one frame
+    passed = test_content_framing(4096, <<"Easy">>),
+    %% exactly one frame (empty frame = 8 bytes)
+    passed = test_content_framing(11, <<"One">>),
+    %% more than one frame
+    passed = test_content_framing(11, <<"More than one frame">>),
+    passed.
+
+test_content_framing(FrameMax, BodyBin) ->
+    [Header | Frames] =
+        rabbit_binary_generator:build_simple_content_frames(
+          1,
+          rabbit_binary_generator:ensure_content_encoded(
+            rabbit_basic:build_content(#'P_basic'{}, BodyBin),
+            rabbit_framing_amqp_0_9_1),
+          FrameMax,
+          rabbit_framing_amqp_0_9_1),
+    %% header is formatted correctly and the size is the total of the
+    %% fragments
+    <<_FrameHeader:7/binary, _ClassAndWeight:4/binary,
+      BodySize:64/unsigned, _Rest/binary>> = list_to_binary(Header),
+    BodySize = size(BodyBin),
+    true = lists:all(
+             fun (ContentFrame) ->
+                     FrameBinary = list_to_binary(ContentFrame),
+                     %% assert
+                     <<_TypeAndChannel:3/binary,
+                       Size:32/unsigned, _Payload:Size/binary, 16#CE>> =
+                         FrameBinary,
+                     size(FrameBinary) =< FrameMax
+             end, Frames),
+    passed.
+
+content_transcoding(_Config) ->
+    %% there are no guarantees provided by 'clear' - it's just a hint
+    ClearDecoded = fun rabbit_binary_parser:clear_decoded_content/1,
+    ClearEncoded = fun rabbit_binary_generator:clear_encoded_content/1,
+    EnsureDecoded =
+        fun (C0) ->
+                C1 = rabbit_binary_parser:ensure_content_decoded(C0),
+                true = C1#content.properties =/= none,
+                C1
+        end,
+    EnsureEncoded =
+        fun (Protocol) ->
+                fun (C0) ->
+                        C1 = rabbit_binary_generator:ensure_content_encoded(
+                               C0, Protocol),
+                        true = C1#content.properties_bin =/= none,
+                        C1
+                end
+        end,
+    %% Beyond the assertions in Ensure*, the only testable guarantee
+    %% is that the operations should never fail.
+    %%
+    %% If we were using quickcheck we'd simply stuff all the above
+    %% into a generator for sequences of operations. In the absence of
+    %% quickcheck we pick particularly interesting sequences that:
+    %%
+    %% - execute every op twice since they are idempotent
+    %% - invoke clear_decoded, clear_encoded, decode and transcode
+    %%   with one or both of decoded and encoded content present
+    [begin
+         sequence_with_content([Op]),
+         sequence_with_content([ClearEncoded, Op]),
+         sequence_with_content([ClearDecoded, Op])
+     end || Op <- [ClearDecoded, ClearEncoded, EnsureDecoded,
+                   EnsureEncoded(rabbit_framing_amqp_0_9_1),
+                   EnsureEncoded(rabbit_framing_amqp_0_8)]],
+    passed.
+
+sequence_with_content(Sequence) ->
+    lists:foldl(fun (F, V) -> F(F(V)) end,
+                rabbit_binary_generator:ensure_content_encoded(
+                  rabbit_basic:build_content(#'P_basic'{}, <<>>),
+                  rabbit_framing_amqp_0_9_1),
+                Sequence).
+
+pmerge(_Config) ->
+    P = [{a, 1}, {b, 2}],
+    P = rabbit_misc:pmerge(a, 3, P),
+    [{c, 3} | P] = rabbit_misc:pmerge(c, 3, P),
+    passed.
+
+plmerge(_Config) ->
+    P1 = [{a, 1}, {b, 2}, {c, 3}],
+    P2 = [{a, 2}, {d, 4}],
+    [{a, 1}, {b, 2}, {c, 3}, {d, 4}] = rabbit_misc:plmerge(P1, P2),
+    passed.
+
+table_codec(_Config) ->
+    %% FIXME this does not test inexact numbers (double and float) yet,
+    %% because they won't pass the equality assertions
+    Table = [{<<"longstr">>,   longstr,   <<"Here is a long string">>},
+             {<<"signedint">>, signedint, 12345},
+             {<<"decimal">>,   decimal,   {3, 123456}},
+             {<<"timestamp">>, timestamp, 109876543209876},
+             {<<"table">>,     table,     [{<<"one">>, signedint, 54321},
+                                           {<<"two">>, longstr,
+                                            <<"A long string">>}]},
+             {<<"byte">>,      byte,      -128},
+             {<<"long">>,      long,      1234567890},
+             {<<"short">>,     short,     655},
+             {<<"bool">>,      bool,      true},
+             {<<"binary">>,    binary,    <<"a binary string">>},
+             {<<"unsignedbyte">>, unsignedbyte, 250},
+             {<<"unsignedshort">>, unsignedshort, 65530},
+             {<<"unsignedint">>, unsignedint, 4294967290},
+             {<<"void">>,      void,      undefined},
+             {<<"array">>,     array,     [{signedint, 54321},
+                                           {longstr, <<"A long string">>}]}
+            ],
+    Binary = <<
+               7,"longstr",   "S", 21:32, "Here is a long string",
+               9,"signedint", "I", 12345:32/signed,
+               7,"decimal",   "D", 3, 123456:32,
+               9,"timestamp", "T", 109876543209876:64,
+               5,"table",     "F", 31:32, % length of table
+               3,"one",       "I", 54321:32,
+               3,"two",       "S", 13:32, "A long string",
+               4,"byte",      "b", -128:8/signed,
+               4,"long",      "l", 1234567890:64,
+               5,"short",     "s", 655:16,
+               4,"bool",      "t", 1,
+               6,"binary",    "x", 15:32, "a binary string",
+               12,"unsignedbyte", "B", 250:8/unsigned,
+               13,"unsignedshort", "u", 65530:16/unsigned,
+               11,"unsignedint", "i", 4294967290:32/unsigned,
+               4,"void",      "V",
+               5,"array",     "A", 23:32,
+               "I", 54321:32,
+               "S", 13:32, "A long string"
+             >>,
+    Binary = rabbit_binary_generator:generate_table(Table),
+    Table  = rabbit_binary_parser:parse_table(Binary),
+    passed.
+
+unfold(_Config) ->
+    {[], test} = rabbit_misc:unfold(fun (_V) -> false end, test),
+    List = lists:seq(2,20,2),
+    {List, 0} = rabbit_misc:unfold(fun (0) -> false;
+                                       (N) -> {true, N*2, N-1}
+                                   end, 10),
+    passed.
+
+version_equivalance(_Config) ->
+    true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0"),
+    true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.1"),
+    true = rabbit_misc:version_minor_equivalent("%%VSN%%", "%%VSN%%"),
+    false = rabbit_misc:version_minor_equivalent("3.0.0", "3.1.0"),
+    false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0"),
+    false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0.1"),
+    false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.foo"),
+    passed.
diff --git a/rabbitmq-server/test/unit_inbroker_SUITE.erl b/rabbitmq-server/test/unit_inbroker_SUITE.erl
new file mode 100644 (file)
index 0000000..e9ecbf5
--- /dev/null
@@ -0,0 +1,3824 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2016 Pivotal Software, Inc.  All rights reserved.
+%%
+
+-module(unit_inbroker_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(PERSISTENT_MSG_STORE, msg_store_persistent).
+-define(TRANSIENT_MSG_STORE,  msg_store_transient).
+
+-define(TIMEOUT_LIST_OPS_PASS, 5000).
+-define(TIMEOUT, 30000).
+
+-define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
+
+-define(VARIABLE_QUEUE_TESTCASES, [
+    variable_queue_dynamic_duration_change,
+    variable_queue_partial_segments_delta_thing,
+    variable_queue_all_the_bits_not_covered_elsewhere_A,
+    variable_queue_all_the_bits_not_covered_elsewhere_B,
+    variable_queue_drop,
+    variable_queue_fold_msg_on_disk,
+    variable_queue_dropfetchwhile,
+    variable_queue_dropwhile_varying_ram_duration,
+    variable_queue_fetchwhile_varying_ram_duration,
+    variable_queue_ack_limiting,
+    variable_queue_purge,
+    variable_queue_requeue,
+    variable_queue_requeue_ram_beta,
+    variable_queue_fold,
+    variable_queue_batch_publish,
+    variable_queue_batch_publish_delivered
+  ]).
+
+-define(BACKING_QUEUE_TESTCASES, [
+    bq_queue_index,
+    bq_queue_index_props,
+    {variable_queue_default, [], ?VARIABLE_QUEUE_TESTCASES},
+    {variable_queue_lazy, [], ?VARIABLE_QUEUE_TESTCASES ++
+                              [variable_queue_mode_change]},
+    bq_variable_queue_delete_msg_store_files_callback,
+    bq_queue_recover
+  ]).
+
+-define(CLUSTER_TESTCASES, [
+    delegates_async,
+    delegates_sync,
+    queue_cleanup,
+    declare_on_dead_queue,
+    refresh_events
+  ]).
+
+all() ->
+    [
+      {group, parallel_tests},
+      {group, non_parallel_tests},
+      {group, backing_queue_tests},
+      {group, cluster_tests},
+
+      {group, disconnect_detected_during_alarm},
+      {group, list_consumers_sanity_check},
+      {group, list_queues_online_and_offline}
+    ].
+
+groups() ->
+    [
+      {parallel_tests, [parallel], [
+          amqp_connection_refusal,
+          configurable_server_properties,
+          confirms,
+          credit_flow_settings,
+          dynamic_mirroring,
+          gen_server2_with_state,
+          list_operations_timeout_pass,
+          mcall,
+          {password_hashing, [], [
+              password_hashing,
+              change_password
+            ]},
+          {policy_validation, [parallel, {repeat, 20}], [
+              ha_policy_validation,
+              policy_validation,
+              policy_opts_validation,
+              queue_master_location_policy_validation,
+              queue_modes_policy_validation,
+              vhost_removed_while_updating_policy
+            ]},
+          runtime_parameters,
+          set_disk_free_limit_command,
+          topic_matching,
+          user_management
+        ]},
+      {non_parallel_tests, [], [
+          app_management, %% Restart RabbitMQ.
+          channel_statistics, %% Expect specific statistics.
+          disk_monitor, %% Replace rabbit_misc module.
+          file_handle_cache, %% Change FHC limit.
+          head_message_timestamp_statistics, %% Expect specific statistics.
+          log_management, %% Check log files.
+          log_management_during_startup, %% Check log files.
+          memory_high_watermark, %% Trigger alarm.
+          rotate_logs_without_suffix, %% Check log files.
+          server_status %% Trigger alarm.
+        ]},
+      {backing_queue_tests, [], [
+          msg_store,
+          {backing_queue_embed_limit_0, [], ?BACKING_QUEUE_TESTCASES},
+          {backing_queue_embed_limit_1024, [], ?BACKING_QUEUE_TESTCASES}
+        ]},
+      {cluster_tests, [], [
+          {from_cluster_node1, [], ?CLUSTER_TESTCASES},
+          {from_cluster_node2, [], ?CLUSTER_TESTCASES}
+        ]},
+
+      %% Test previously executed with the multi-node target.
+      {disconnect_detected_during_alarm, [], [
+          disconnect_detected_during_alarm %% Trigger alarm.
+        ]},
+      {list_consumers_sanity_check, [], [
+          list_consumers_sanity_check
+        ]},
+      {list_queues_online_and_offline, [], [
+          list_queues_online_and_offline %% Stop node B.
+        ]}
+    ].
+
+group(backing_queue_tests) ->
+    [
+      %% Several tests based on lazy queues may take more than 30 minutes.
+      {timetrap, {hours, 1}}
+    ];
+group(_) ->
+    [].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+    rabbit_ct_helpers:log_environment(),
+    rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+    rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+    case lists:member({group, Group}, all()) of
+        true ->
+            ClusterSize = case Group of
+                disconnect_detected_during_alarm -> 1;
+                list_consumers_sanity_check      -> 1;
+                _                                -> 2
+            end,
+            Config1 = rabbit_ct_helpers:set_config(Config, [
+                {rmq_nodename_suffix, Group},
+                {rmq_nodes_count, ClusterSize}
+              ]),
+            rabbit_ct_helpers:run_steps(Config1,
+              rabbit_ct_broker_helpers:setup_steps() ++
+              rabbit_ct_client_helpers:setup_steps() ++ [
+                fun(C) -> init_per_group1(Group, C) end,
+                fun setup_file_handle_cache/1
+              ]);
+        false ->
+            rabbit_ct_helpers:run_steps(Config, [
+                fun(C) -> init_per_group1(Group, C) end
+              ])
+    end.
+
+init_per_group1(backing_queue_tests, Config) ->
+    Module = rabbit_ct_broker_helpers:rpc(Config, 0,
+      application, get_env, [rabbit, backing_queue_module]),
+    case Module of
+        {ok, rabbit_priority_queue} ->
+            rabbit_ct_broker_helpers:rpc(Config, 0,
+              ?MODULE, setup_backing_queue_test_group, [Config]);
+        _ ->
+            {skip, rabbit_misc:format(
+               "Backing queue module not supported by this test group: ~p~n",
+               [Module])}
+    end;
+init_per_group1(backing_queue_embed_limit_0, Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+      application, set_env, [rabbit, queue_index_embed_msgs_below, 0]),
+    Config;
+init_per_group1(backing_queue_embed_limit_1024, Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+      application, set_env, [rabbit, queue_index_embed_msgs_below, 1024]),
+    Config;
+init_per_group1(variable_queue_default, Config) ->
+    rabbit_ct_helpers:set_config(Config, {variable_queue_type, default});
+init_per_group1(variable_queue_lazy, Config) ->
+    rabbit_ct_helpers:set_config(Config, {variable_queue_type, lazy});
+init_per_group1(from_cluster_node1, Config) ->
+    rabbit_ct_helpers:set_config(Config, {test_direction, {0, 1}});
+init_per_group1(from_cluster_node2, Config) ->
+    rabbit_ct_helpers:set_config(Config, {test_direction, {1, 0}});
+init_per_group1(_, Config) ->
+    Config.
+
+setup_file_handle_cache(Config) ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, setup_file_handle_cache1, []),
+    Config.
+
+setup_file_handle_cache1() ->
+    %% FIXME: Why are we doing this?
+    application:set_env(rabbit, file_handles_high_watermark, 10),
+    ok = file_handle_cache:set_limit(10),
+    ok.
+
+end_per_group(Group, Config) ->
+    case lists:member({group, Group}, all()) of
+        true ->
+            rabbit_ct_helpers:run_steps(Config,
+              [fun(C) -> end_per_group1(Group, C) end] ++
+              rabbit_ct_client_helpers:teardown_steps() ++
+              rabbit_ct_broker_helpers:teardown_steps());
+        false ->
+            Config
+    end.
+
+end_per_group1(backing_queue_tests, Config) ->
+    rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, teardown_backing_queue_test_group, [Config]);
+end_per_group1(Group, Config)
+when   Group =:= backing_queue_embed_limit_0
+orelse Group =:= backing_queue_embed_limit_1024 ->
+    ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+      application, set_env, [rabbit, queue_index_embed_msgs_below,
+        ?config(rmq_queue_index_embed_msgs_below, Config)]),
+    Config;
+end_per_group1(_, Config) ->
+    Config.
+
+init_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+    rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Application management.
+%% -------------------------------------------------------------------
+
+app_management(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, app_management1, [Config]).
+
+app_management1(_Config) ->
+    control_action(wait, [os:getenv("RABBITMQ_PID_FILE")]),
+    %% Starting, stopping and diagnostics.  Note that we don't try
+    %% 'report' when the rabbit app is stopped and that we enable
+    %% tracing for the duration of this function.
+    ok = control_action(trace_on, []),
+    ok = control_action(stop_app, []),
+    ok = control_action(stop_app, []),
+    ok = control_action(status, []),
+    ok = control_action(cluster_status, []),
+    ok = control_action(environment, []),
+    ok = control_action(start_app, []),
+    ok = control_action(start_app, []),
+    ok = control_action(status, []),
+    ok = control_action(report, []),
+    ok = control_action(cluster_status, []),
+    ok = control_action(environment, []),
+    ok = control_action(trace_off, []),
+    passed.
+
+%% -------------------------------------------------------------------
+%% Message store.
+%% -------------------------------------------------------------------
+
+msg_store(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, msg_store1, [Config]).
+
+msg_store1(_Config) ->
+    restart_msg_store_empty(),
+    MsgIds = [msg_id_bin(M) || M <- lists:seq(1,100)],
+    {MsgIds1stHalf, MsgIds2ndHalf} = lists:split(length(MsgIds) div 2, MsgIds),
+    Ref = rabbit_guid:gen(),
+    {Cap, MSCState} = msg_store_client_init_capture(
+                        ?PERSISTENT_MSG_STORE, Ref),
+    Ref2 = rabbit_guid:gen(),
+    {Cap2, MSC2State} = msg_store_client_init_capture(
+                          ?PERSISTENT_MSG_STORE, Ref2),
+    %% check we don't contain any of the msgs we're about to publish
+    false = msg_store_contains(false, MsgIds, MSCState),
+    %% test confirm logic
+    passed = test_msg_store_confirms([hd(MsgIds)], Cap, MSCState),
+    %% check we don't contain any of the msgs we're about to publish
+    false = msg_store_contains(false, MsgIds, MSCState),
+    %% publish the first half
+    ok = msg_store_write(MsgIds1stHalf, MSCState),
+    %% sync on the first half
+    ok = on_disk_await(Cap, MsgIds1stHalf),
+    %% publish the second half
+    ok = msg_store_write(MsgIds2ndHalf, MSCState),
+    %% check they're all in there
+    true = msg_store_contains(true, MsgIds, MSCState),
+    %% publish the latter half twice so we hit the caching and ref
+    %% count code. We need to do this through a 2nd client since a
+    %% single client is not supposed to write the same message more
+    %% than once without first removing it.
+    ok = msg_store_write(MsgIds2ndHalf, MSC2State),
+    %% check they're still all in there
+    true = msg_store_contains(true, MsgIds, MSCState),
+    %% sync on the 2nd half
+    ok = on_disk_await(Cap2, MsgIds2ndHalf),
+    %% cleanup
+    ok = on_disk_stop(Cap2),
+    ok = rabbit_msg_store:client_delete_and_terminate(MSC2State),
+    ok = on_disk_stop(Cap),
+    %% read them all
+    MSCState1 = msg_store_read(MsgIds, MSCState),
+    %% read them all again - this will hit the cache, not disk
+    MSCState2 = msg_store_read(MsgIds, MSCState1),
+    %% remove them all
+    ok = msg_store_remove(MsgIds, MSCState2),
+    %% check first half doesn't exist
+    false = msg_store_contains(false, MsgIds1stHalf, MSCState2),
+    %% check second half does exist
+    true = msg_store_contains(true, MsgIds2ndHalf, MSCState2),
+    %% read the second half again
+    MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2),
+    %% read the second half again, just for fun (aka code coverage)
+    MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3),
+    ok = rabbit_msg_store:client_terminate(MSCState4),
+    %% stop and restart, preserving every other msg in 2nd half
+    ok = rabbit_variable_queue:stop_msg_store(),
+    ok = rabbit_variable_queue:start_msg_store(
+           [], {fun ([]) -> finished;
+                    ([MsgId|MsgIdsTail])
+                      when length(MsgIdsTail) rem 2 == 0 ->
+                        {MsgId, 1, MsgIdsTail};
+                    ([MsgId|MsgIdsTail]) ->
+                        {MsgId, 0, MsgIdsTail}
+                end, MsgIds2ndHalf}),
+    MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+    %% check we have the right msgs left
+    lists:foldl(
+      fun (MsgId, Bool) ->
+              not(Bool = rabbit_msg_store:contains(MsgId, MSCState5))
+      end, false, MsgIds2ndHalf),
+    ok = rabbit_msg_store:client_terminate(MSCState5),
+    %% restart empty
+    restart_msg_store_empty(),
+    MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+    %% check we don't contain any of the msgs
+    false = msg_store_contains(false, MsgIds, MSCState6),
+    %% publish the first half again
+    ok = msg_store_write(MsgIds1stHalf, MSCState6),
+    %% this should force some sort of sync internally otherwise misread
+    ok = rabbit_msg_store:client_terminate(
+           msg_store_read(MsgIds1stHalf, MSCState6)),
+    MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+    ok = msg_store_remove(MsgIds1stHalf, MSCState7),
+    ok = rabbit_msg_store:client_terminate(MSCState7),
+    %% restart empty
+    restart_msg_store_empty(), %% now safe to reuse msg_ids
+    %% push a lot of msgs in... at least 100 files worth
+    {ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit),
+    PayloadSizeBits = 65536,
+    BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)),
+    MsgIdsBig = [msg_id_bin(X) || X <- lists:seq(1, BigCount)],
+    Payload = << 0:PayloadSizeBits >>,
+    ok = with_msg_store_client(
+           ?PERSISTENT_MSG_STORE, Ref,
+           fun (MSCStateM) ->
+                   [ok = rabbit_msg_store:write(MsgId, Payload, MSCStateM) ||
+                       MsgId <- MsgIdsBig],
+                   MSCStateM
+           end),
+    %% now read them to ensure we hit the fast client-side reading
+    ok = foreach_with_msg_store_client(
+           ?PERSISTENT_MSG_STORE, Ref,
+           fun (MsgId, MSCStateM) ->
+                   {{ok, Payload}, MSCStateN} = rabbit_msg_store:read(
+                                                  MsgId, MSCStateM),
+                   MSCStateN
+           end, MsgIdsBig),
+    %% .., then 3s by 1...
+    ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+                          [msg_id_bin(X) || X <- lists:seq(BigCount, 1, -3)]),
+    %% .., then remove 3s by 2, from the young end first. This hits
+    %% GC (under 50% good data left, but no empty files. Must GC).
+    ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+                          [msg_id_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]),
+    %% .., then remove 3s by 3, from the young end first. This hits
+    %% GC...
+    ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+                          [msg_id_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]),
+    %% ensure empty
+    ok = with_msg_store_client(
+           ?PERSISTENT_MSG_STORE, Ref,
+           fun (MSCStateM) ->
+                   false = msg_store_contains(false, MsgIdsBig, MSCStateM),
+                   MSCStateM
+           end),
+    %%
+    passed = test_msg_store_client_delete_and_terminate(),
+    %% restart empty
+    restart_msg_store_empty(),
+    passed.
+
+restart_msg_store_empty() ->
+    ok = rabbit_variable_queue:stop_msg_store(),
+    ok = rabbit_variable_queue:start_msg_store(
+           undefined, {fun (ok) -> finished end, ok}).
+
+msg_id_bin(X) ->
+    erlang:md5(term_to_binary(X)).
+
+on_disk_capture() ->
+    receive
+        {await, MsgIds, Pid} -> on_disk_capture([], MsgIds, Pid);
+        stop                 -> done
+    end.
+
+on_disk_capture([_|_], _Awaiting, Pid) ->
+    Pid ! {self(), surplus};
+on_disk_capture(OnDisk, Awaiting, Pid) ->
+    receive
+        {on_disk, MsgIdsS} ->
+            MsgIds = gb_sets:to_list(MsgIdsS),
+            on_disk_capture(OnDisk ++ (MsgIds -- Awaiting), Awaiting -- MsgIds,
+                            Pid);
+        stop ->
+            done
+    after (case Awaiting of [] -> 200; _ -> ?TIMEOUT end) ->
+            case Awaiting of
+                [] -> Pid ! {self(), arrived}, on_disk_capture();
+                _  -> Pid ! {self(), timeout}
+            end
+    end.
+
+on_disk_await(Pid, MsgIds) when is_list(MsgIds) ->
+    Pid ! {await, MsgIds, self()},
+    receive
+        {Pid, arrived} -> ok;
+        {Pid, Error}   -> Error
+    end.
+
+on_disk_stop(Pid) ->
+    MRef = erlang:monitor(process, Pid),
+    Pid ! stop,
+    receive {'DOWN', MRef, process, Pid, _Reason} ->
+            ok
+    end.
+
+msg_store_client_init_capture(MsgStore, Ref) ->
+    Pid = spawn(fun on_disk_capture/0),
+    {Pid, rabbit_msg_store:client_init(
+            MsgStore, Ref, fun (MsgIds, _ActionTaken) ->
+                                   Pid ! {on_disk, MsgIds}
+                           end, undefined)}.
+
+msg_store_contains(Atom, MsgIds, MSCState) ->
+    Atom = lists:foldl(
+             fun (MsgId, Atom1) when Atom1 =:= Atom ->
+                     rabbit_msg_store:contains(MsgId, MSCState) end,
+             Atom, MsgIds).
+
+msg_store_read(MsgIds, MSCState) ->
+    lists:foldl(fun (MsgId, MSCStateM) ->
+                        {{ok, MsgId}, MSCStateN} = rabbit_msg_store:read(
+                                                     MsgId, MSCStateM),
+                        MSCStateN
+                end, MSCState, MsgIds).
+
+msg_store_write(MsgIds, MSCState) ->
+    ok = lists:foldl(fun (MsgId, ok) ->
+                             rabbit_msg_store:write(MsgId, MsgId, MSCState)
+                     end, ok, MsgIds).
+
+msg_store_write_flow(MsgIds, MSCState) ->
+    ok = lists:foldl(fun (MsgId, ok) ->
+                             rabbit_msg_store:write_flow(MsgId, MsgId, MSCState)
+                     end, ok, MsgIds).
+
+msg_store_remove(MsgIds, MSCState) ->
+    rabbit_msg_store:remove(MsgIds, MSCState).
+
+msg_store_remove(MsgStore, Ref, MsgIds) ->
+    with_msg_store_client(MsgStore, Ref,
+                          fun (MSCStateM) ->
+                                  ok = msg_store_remove(MsgIds, MSCStateM),
+                                  MSCStateM
+                          end).
+
+with_msg_store_client(MsgStore, Ref, Fun) ->
+    rabbit_msg_store:client_terminate(
+      Fun(msg_store_client_init(MsgStore, Ref))).
+
+foreach_with_msg_store_client(MsgStore, Ref, Fun, L) ->
+    rabbit_msg_store:client_terminate(
+      lists:foldl(fun (MsgId, MSCState) -> Fun(MsgId, MSCState) end,
+                  msg_store_client_init(MsgStore, Ref), L)).
+
+test_msg_store_confirms(MsgIds, Cap, MSCState) ->
+    %% write -> confirmed
+    ok = msg_store_write(MsgIds, MSCState),
+    ok = on_disk_await(Cap, MsgIds),
+    %% remove -> _
+    ok = msg_store_remove(MsgIds, MSCState),
+    ok = on_disk_await(Cap, []),
+    %% write, remove -> confirmed
+    ok = msg_store_write(MsgIds, MSCState),
+    ok = msg_store_remove(MsgIds, MSCState),
+    ok = on_disk_await(Cap, MsgIds),
+    %% write, remove, write -> confirmed, confirmed
+    ok = msg_store_write(MsgIds, MSCState),
+    ok = msg_store_remove(MsgIds, MSCState),
+    ok = msg_store_write(MsgIds, MSCState),
+    ok = on_disk_await(Cap, MsgIds ++ MsgIds),
+    %% remove, write -> confirmed
+    ok = msg_store_remove(MsgIds, MSCState),
+    ok = msg_store_write(MsgIds, MSCState),
+    ok = on_disk_await(Cap, MsgIds),
+    %% remove, write, remove -> confirmed
+    ok = msg_store_remove(MsgIds, MSCState),
+    ok = msg_store_write(MsgIds, MSCState),
+    ok = msg_store_remove(MsgIds, MSCState),
+    ok = on_disk_await(Cap, MsgIds),
+    %% confirmation on timer-based sync
+    passed = test_msg_store_confirm_timer(),
+    passed.
+
+test_msg_store_confirm_timer() ->
+    Ref = rabbit_guid:gen(),
+    MsgId  = msg_id_bin(1),
+    Self = self(),
+    MSCState = rabbit_msg_store:client_init(
+                 ?PERSISTENT_MSG_STORE, Ref,
+                 fun (MsgIds, _ActionTaken) ->
+                         case gb_sets:is_member(MsgId, MsgIds) of
+                             true  -> Self ! on_disk;
+                             false -> ok
+                         end
+                 end, undefined),
+    ok = msg_store_write([MsgId], MSCState),
+    ok = msg_store_keep_busy_until_confirm([msg_id_bin(2)], MSCState, false),
+    ok = msg_store_remove([MsgId], MSCState),
+    ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+    passed.
+
+msg_store_keep_busy_until_confirm(MsgIds, MSCState, Blocked) ->
+    After = case Blocked of
+                false -> 0;
+                true  -> ?MAX_WAIT
+            end,
+    Recurse = fun () -> msg_store_keep_busy_until_confirm(
+                          MsgIds, MSCState, credit_flow:blocked()) end,
+    receive
+        on_disk            -> ok;
+        {bump_credit, Msg} -> credit_flow:handle_bump_msg(Msg),
+                              Recurse()
+    after After ->
+            ok = msg_store_write_flow(MsgIds, MSCState),
+            ok = msg_store_remove(MsgIds, MSCState),
+            Recurse()
+    end.
+
+test_msg_store_client_delete_and_terminate() ->
+    restart_msg_store_empty(),
+    MsgIds = [msg_id_bin(M) || M <- lists:seq(1, 10)],
+    Ref = rabbit_guid:gen(),
+    MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+    ok = msg_store_write(MsgIds, MSCState),
+    %% test the 'dying client' fast path for writes
+    ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+    passed.
+
+%% -------------------------------------------------------------------
+%% Backing queue.
+%% -------------------------------------------------------------------
+
+setup_backing_queue_test_group(Config) ->
+    {ok, FileSizeLimit} =
+        application:get_env(rabbit, msg_store_file_size_limit),
+    application:set_env(rabbit, msg_store_file_size_limit, 512),
+    {ok, MaxJournal} =
+        application:get_env(rabbit, queue_index_max_journal_entries),
+    application:set_env(rabbit, queue_index_max_journal_entries, 128),
+    application:set_env(rabbit, msg_store_file_size_limit,
+                        FileSizeLimit),
+    {ok, Bytes} =
+        application:get_env(rabbit, queue_index_embed_msgs_below),
+    rabbit_ct_helpers:set_config(Config, [
+        {rmq_queue_index_max_journal_entries, MaxJournal},
+        {rmq_queue_index_embed_msgs_below, Bytes}
+      ]).
+
+teardown_backing_queue_test_group(Config) ->
+    %% FIXME: Undo all the setup function did.
+    application:set_env(rabbit, queue_index_max_journal_entries,
+                        ?config(rmq_queue_index_max_journal_entries, Config)),
+    %% We will have restarted the message store, and thus changed
+    %% the order of the children of rabbit_sup. This will cause
+    %% problems if there are subsequent failures - see bug 24262.
+    ok = restart_app(),
+    Config.
+
+bq_queue_index(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, bq_queue_index1, [Config]).
+
+bq_queue_index1(_Config) ->
+    SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+    TwoSegs = SegmentSize + SegmentSize,
+    MostOfASegment = trunc(SegmentSize*0.75),
+    SeqIdsA = lists:seq(0, MostOfASegment-1),
+    SeqIdsB = lists:seq(MostOfASegment, 2*MostOfASegment),
+    SeqIdsC = lists:seq(0, trunc(SegmentSize/2)),
+    SeqIdsD = lists:seq(0, SegmentSize*4),
+
+    with_empty_test_queue(
+      fun (Qi0) ->
+              {0, 0, Qi1} = rabbit_queue_index:bounds(Qi0),
+              {Qi2, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, false, Qi1),
+              {0, SegmentSize, Qi3} = rabbit_queue_index:bounds(Qi2),
+              {ReadA, Qi4} = rabbit_queue_index:read(0, SegmentSize, Qi3),
+              ok = verify_read_with_published(false, false, ReadA,
+                                              lists:reverse(SeqIdsMsgIdsA)),
+              %% should get length back as 0, as all the msgs were transient
+              {0, 0, Qi6} = restart_test_queue(Qi4),
+              {0, 0, Qi7} = rabbit_queue_index:bounds(Qi6),
+              {Qi8, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi7),
+              {0, TwoSegs, Qi9} = rabbit_queue_index:bounds(Qi8),
+              {ReadB, Qi10} = rabbit_queue_index:read(0, SegmentSize, Qi9),
+              ok = verify_read_with_published(false, true, ReadB,
+                                              lists:reverse(SeqIdsMsgIdsB)),
+              %% should get length back as MostOfASegment
+              LenB = length(SeqIdsB),
+              BytesB = LenB * 10,
+              {LenB, BytesB, Qi12} = restart_test_queue(Qi10),
+              {0, TwoSegs, Qi13} = rabbit_queue_index:bounds(Qi12),
+              Qi14 = rabbit_queue_index:deliver(SeqIdsB, Qi13),
+              {ReadC, Qi15} = rabbit_queue_index:read(0, SegmentSize, Qi14),
+              ok = verify_read_with_published(true, true, ReadC,
+                                              lists:reverse(SeqIdsMsgIdsB)),
+              Qi16 = rabbit_queue_index:ack(SeqIdsB, Qi15),
+              Qi17 = rabbit_queue_index:flush(Qi16),
+              %% Everything will have gone now because #pubs == #acks
+              {0, 0, Qi18} = rabbit_queue_index:bounds(Qi17),
+              %% should get length back as 0 because all persistent
+              %% msgs have been acked
+              {0, 0, Qi19} = restart_test_queue(Qi18),
+              Qi19
+      end),
+
+    %% These next bits are just to hit the auto deletion of segment files.
+    %% First, partials:
+    %% a) partial pub+del+ack, then move to new segment
+    with_empty_test_queue(
+      fun (Qi0) ->
+              {Qi1, _SeqIdsMsgIdsC} = queue_index_publish(SeqIdsC,
+                                                          false, Qi0),
+              Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
+              Qi3 = rabbit_queue_index:ack(SeqIdsC, Qi2),
+              Qi4 = rabbit_queue_index:flush(Qi3),
+              {Qi5, _SeqIdsMsgIdsC1} = queue_index_publish([SegmentSize],
+                                                           false, Qi4),
+              Qi5
+      end),
+
+    %% b) partial pub+del, then move to new segment, then ack all in old segment
+    with_empty_test_queue(
+      fun (Qi0) ->
+              {Qi1, _SeqIdsMsgIdsC2} = queue_index_publish(SeqIdsC,
+                                                           false, Qi0),
+              Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
+              {Qi3, _SeqIdsMsgIdsC3} = queue_index_publish([SegmentSize],
+                                                           false, Qi2),
+              Qi4 = rabbit_queue_index:ack(SeqIdsC, Qi3),
+              rabbit_queue_index:flush(Qi4)
+      end),
+
+    %% c) just fill up several segments of all pubs, then +dels, then +acks
+    with_empty_test_queue(
+      fun (Qi0) ->
+              {Qi1, _SeqIdsMsgIdsD} = queue_index_publish(SeqIdsD,
+                                                          false, Qi0),
+              Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1),
+              Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2),
+              rabbit_queue_index:flush(Qi3)
+      end),
+
+    %% d) get messages in all states to a segment, then flush, then do
+    %% the same again, don't flush and read. This will hit all
+    %% possibilities in combining the segment with the journal.
+    with_empty_test_queue(
+      fun (Qi0) ->
+              {Qi1, [Seven,Five,Four|_]} = queue_index_publish([0,1,2,4,5,7],
+                                                               false, Qi0),
+              Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
+              Qi3 = rabbit_queue_index:ack([0], Qi2),
+              Qi4 = rabbit_queue_index:flush(Qi3),
+              {Qi5, [Eight,Six|_]} = queue_index_publish([3,6,8], false, Qi4),
+              Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
+              Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
+              {[], Qi8} = rabbit_queue_index:read(0, 4, Qi7),
+              {ReadD, Qi9} = rabbit_queue_index:read(4, 7, Qi8),
+              ok = verify_read_with_published(true, false, ReadD,
+                                              [Four, Five, Six]),
+              {ReadE, Qi10} = rabbit_queue_index:read(7, 9, Qi9),
+              ok = verify_read_with_published(false, false, ReadE,
+                                              [Seven, Eight]),
+              Qi10
+      end),
+
+    %% e) as for (d), but use terminate instead of read, which will
+    %% exercise journal_minus_segment, not segment_plus_journal.
+    with_empty_test_queue(
+      fun (Qi0) ->
+              {Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7],
+                                                          true, Qi0),
+              Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
+              Qi3 = rabbit_queue_index:ack([0], Qi2),
+              {5, 50, Qi4} = restart_test_queue(Qi3),
+              {Qi5, _SeqIdsMsgIdsF} = queue_index_publish([3,6,8], true, Qi4),
+              Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
+              Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
+              {5, 50, Qi8} = restart_test_queue(Qi7),
+              Qi8
+      end),
+
+    ok = rabbit_variable_queue:stop(),
+    {ok, _} = rabbit_variable_queue:start([]),
+
+    passed.
+
+bq_queue_index_props(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, bq_queue_index_props1, [Config]).
+
+bq_queue_index_props1(_Config) ->
+    with_empty_test_queue(
+      fun(Qi0) ->
+              MsgId = rabbit_guid:gen(),
+              Props = #message_properties{expiry=12345, size = 10},
+              Qi1 = rabbit_queue_index:publish(
+                      MsgId, 1, Props, true, infinity, Qi0),
+              {[{MsgId, 1, Props, _, _}], Qi2} =
+                  rabbit_queue_index:read(1, 2, Qi1),
+              Qi2
+      end),
+
+    ok = rabbit_variable_queue:stop(),
+    {ok, _} = rabbit_variable_queue:start([]),
+
+    passed.
+
+bq_variable_queue_delete_msg_store_files_callback(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, bq_variable_queue_delete_msg_store_files_callback1, [Config]).
+
+bq_variable_queue_delete_msg_store_files_callback1(Config) ->
+    ok = restart_msg_store_empty(),
+    {new, #amqqueue { pid = QPid, name = QName } = Q} =
+      rabbit_amqqueue:declare(
+        queue_name(Config,
+          <<"bq_variable_queue_delete_msg_store_files_callback-q">>),
+        true, false, [], none),
+    Payload = <<0:8388608>>, %% 1MB
+    Count = 30,
+    publish_and_confirm(Q, Payload, Count),
+
+    rabbit_amqqueue:set_ram_duration_target(QPid, 0),
+
+    {ok, Limiter} = rabbit_limiter:start_link(no_id),
+
+    CountMinusOne = Count - 1,
+    {ok, CountMinusOne, {QName, QPid, _AckTag, false, _Msg}} =
+        rabbit_amqqueue:basic_get(Q, self(), true, Limiter),
+    {ok, CountMinusOne} = rabbit_amqqueue:purge(Q),
+
+    %% give the queue a second to receive the close_fds callback msg
+    timer:sleep(1000),
+
+    rabbit_amqqueue:delete(Q, false, false),
+    passed.
+
+bq_queue_recover(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, bq_queue_recover1, [Config]).
+
+bq_queue_recover1(Config) ->
+    Count = 2 * rabbit_queue_index:next_segment_boundary(0),
+    {new, #amqqueue { pid = QPid, name = QName } = Q} =
+        rabbit_amqqueue:declare(queue_name(Config, <<"bq_queue_recover-q">>),
+                                true, false, [], none),
+    publish_and_confirm(Q, <<>>, Count),
+
+    SupPid = rabbit_ct_broker_helpers:get_queue_sup_pid(QPid),
+    true = is_pid(SupPid),
+    exit(SupPid, kill),
+    exit(QPid, kill),
+    MRef = erlang:monitor(process, QPid),
+    receive {'DOWN', MRef, process, QPid, _Info} -> ok
+    after 10000 -> exit(timeout_waiting_for_queue_death)
+    end,
+    rabbit_amqqueue:stop(),
+    rabbit_amqqueue:start(rabbit_amqqueue:recover()),
+    {ok, Limiter} = rabbit_limiter:start_link(no_id),
+    rabbit_amqqueue:with_or_die(
+      QName,
+      fun (Q1 = #amqqueue { pid = QPid1 }) ->
+              CountMinusOne = Count - 1,
+              {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}} =
+                  rabbit_amqqueue:basic_get(Q1, self(), false, Limiter),
+              exit(QPid1, shutdown),
+              VQ1 = variable_queue_init(Q, true),
+              {{_Msg1, true, _AckTag1}, VQ2} =
+                  rabbit_variable_queue:fetch(true, VQ1),
+              CountMinusOne = rabbit_variable_queue:len(VQ2),
+              _VQ3 = rabbit_variable_queue:delete_and_terminate(shutdown, VQ2),
+              ok = rabbit_amqqueue:internal_delete(QName)
+      end),
+    passed.
+
+variable_queue_dynamic_duration_change(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_dynamic_duration_change1, [Config]).
+
+variable_queue_dynamic_duration_change1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_dynamic_duration_change2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_dynamic_duration_change2(VQ0) ->
+    SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+
+    %% start by sending in a couple of segments worth
+    Len = 2*SegmentSize,
+    VQ1 = variable_queue_publish(false, Len, VQ0),
+    %% squeeze and relax queue
+    Churn = Len div 32,
+    VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
+
+    {Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
+    VQ7 = lists:foldl(
+            fun (Duration1, VQ4) ->
+                    {_Duration, VQ5} = rabbit_variable_queue:ram_duration(VQ4),
+                    VQ6 = variable_queue_set_ram_duration_target(
+                            Duration1, VQ5),
+                    publish_fetch_and_ack(Churn, Len, VQ6)
+            end, VQ3, [Duration / 4, 0, Duration / 4, infinity]),
+
+    %% drain
+    {VQ8, AckTags} = variable_queue_fetch(Len, false, false, Len, VQ7),
+    {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags, VQ8),
+    {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
+
+    VQ10.
+
+variable_queue_partial_segments_delta_thing(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_partial_segments_delta_thing1, [Config]).
+
+variable_queue_partial_segments_delta_thing1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_partial_segments_delta_thing2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_partial_segments_delta_thing2(VQ0) ->
+    SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+    HalfSegment = SegmentSize div 2,
+    OneAndAHalfSegment = SegmentSize + HalfSegment,
+    VQ1 = variable_queue_publish(true, OneAndAHalfSegment, VQ0),
+    {_Duration, VQ2} = rabbit_variable_queue:ram_duration(VQ1),
+    VQ3 = check_variable_queue_status(
+            variable_queue_set_ram_duration_target(0, VQ2),
+            %% one segment in q3, and half a segment in delta
+            [{delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
+             {q3, SegmentSize},
+             {len, SegmentSize + HalfSegment}]),
+    VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
+    VQ5 = check_variable_queue_status(
+            variable_queue_publish(true, 1, VQ4),
+            %% one alpha, but it's in the same segment as the deltas
+            [{q1, 1},
+             {delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
+             {q3, SegmentSize},
+             {len, SegmentSize + HalfSegment + 1}]),
+    {VQ6, AckTags} = variable_queue_fetch(SegmentSize, true, false,
+                                          SegmentSize + HalfSegment + 1, VQ5),
+    VQ7 = check_variable_queue_status(
+            VQ6,
+            %% the half segment should now be in q3
+            [{q1, 1},
+             {delta, {delta, undefined, 0, undefined}},
+             {q3, HalfSegment},
+             {len, HalfSegment + 1}]),
+    {VQ8, AckTags1} = variable_queue_fetch(HalfSegment + 1, true, false,
+                                           HalfSegment + 1, VQ7),
+    {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8),
+    %% should be empty now
+    {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
+    VQ10.
+
+variable_queue_all_the_bits_not_covered_elsewhere_A(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_all_the_bits_not_covered_elsewhere_A1, [Config]).
+
+variable_queue_all_the_bits_not_covered_elsewhere_A1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_all_the_bits_not_covered_elsewhere_A2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_all_the_bits_not_covered_elsewhere_A2(VQ0) ->
+    Count = 2 * rabbit_queue_index:next_segment_boundary(0),
+    VQ1 = variable_queue_publish(true, Count, VQ0),
+    VQ2 = variable_queue_publish(false, Count, VQ1),
+    VQ3 = variable_queue_set_ram_duration_target(0, VQ2),
+    {VQ4, _AckTags}  = variable_queue_fetch(Count, true, false,
+                                            Count + Count, VQ3),
+    {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false,
+                                            Count, VQ4),
+    _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
+    VQ7 = variable_queue_init(test_amqqueue(true), true),
+    {{_Msg1, true, _AckTag1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7),
+    Count1 = rabbit_variable_queue:len(VQ8),
+    VQ9 = variable_queue_publish(false, 1, VQ8),
+    VQ10 = variable_queue_set_ram_duration_target(0, VQ9),
+    {VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ10),
+    {VQ12, _AckTags3} = variable_queue_fetch(1, false, false, 1, VQ11),
+    VQ12.
+
+variable_queue_all_the_bits_not_covered_elsewhere_B(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_all_the_bits_not_covered_elsewhere_B1, [Config]).
+
+variable_queue_all_the_bits_not_covered_elsewhere_B1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_all_the_bits_not_covered_elsewhere_B2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_all_the_bits_not_covered_elsewhere_B2(VQ0) ->
+    VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
+    VQ2 = variable_queue_publish(false, 4, VQ1),
+    {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2),
+    {_Guids, VQ4} =
+        rabbit_variable_queue:requeue(AckTags, VQ3),
+    VQ5 = rabbit_variable_queue:timeout(VQ4),
+    _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
+    VQ7 = variable_queue_init(test_amqqueue(true), true),
+    {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7),
+    VQ8.
+
+variable_queue_drop(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_drop1, [Config]).
+
+variable_queue_drop1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_drop2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_drop2(VQ0) ->
+    %% start by sending a messages
+    VQ1 = variable_queue_publish(false, 1, VQ0),
+    %% drop message with AckRequired = true
+    {{MsgId, AckTag}, VQ2} = rabbit_variable_queue:drop(true, VQ1),
+    true = rabbit_variable_queue:is_empty(VQ2),
+    true = AckTag =/= undefinded,
+    %% drop again -> empty
+    {empty, VQ3} = rabbit_variable_queue:drop(false, VQ2),
+    %% requeue
+    {[MsgId], VQ4} = rabbit_variable_queue:requeue([AckTag], VQ3),
+    %% drop message with AckRequired = false
+    {{MsgId, undefined}, VQ5} = rabbit_variable_queue:drop(false, VQ4),
+    true = rabbit_variable_queue:is_empty(VQ5),
+    VQ5.
+
+variable_queue_fold_msg_on_disk(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_fold_msg_on_disk1, [Config]).
+
+variable_queue_fold_msg_on_disk1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_fold_msg_on_disk2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_fold_msg_on_disk2(VQ0) ->
+    VQ1 = variable_queue_publish(true, 1, VQ0),
+    {VQ2, AckTags} = variable_queue_fetch(1, true, false, 1, VQ1),
+    {ok, VQ3} = rabbit_variable_queue:ackfold(fun (_M, _A, ok) -> ok end,
+                                              ok, VQ2, AckTags),
+    VQ3.
+
+variable_queue_dropfetchwhile(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_dropfetchwhile1, [Config]).
+
+variable_queue_dropfetchwhile1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_dropfetchwhile2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_dropfetchwhile2(VQ0) ->
+    Count = 10,
+
+    %% add messages with sequential expiry
+    VQ1 = variable_queue_publish(
+            false, 1, Count,
+            fun (N, Props) -> Props#message_properties{expiry = N} end,
+            fun erlang:term_to_binary/1, VQ0),
+
+    %% fetch the first 5 messages
+    {#message_properties{expiry = 6}, {Msgs, AckTags}, VQ2} =
+        rabbit_variable_queue:fetchwhile(
+          fun (#message_properties{expiry = Expiry}) -> Expiry =< 5 end,
+          fun (Msg, AckTag, {MsgAcc, AckAcc}) ->
+                  {[Msg | MsgAcc], [AckTag | AckAcc]}
+          end, {[], []}, VQ1),
+    true = lists:seq(1, 5) == [msg2int(M) || M <- lists:reverse(Msgs)],
+
+    %% requeue them
+    {_MsgIds, VQ3} = rabbit_variable_queue:requeue(AckTags, VQ2),
+
+    %% drop the first 5 messages
+    {#message_properties{expiry = 6}, VQ4} =
+        rabbit_variable_queue:dropwhile(
+          fun (#message_properties {expiry = Expiry}) -> Expiry =< 5 end, VQ3),
+
+    %% fetch 5
+    VQ5 = lists:foldl(fun (N, VQN) ->
+                              {{Msg, _, _}, VQM} =
+                                  rabbit_variable_queue:fetch(false, VQN),
+                              true = msg2int(Msg) == N,
+                              VQM
+                      end, VQ4, lists:seq(6, Count)),
+
+    %% should be empty now
+    true = rabbit_variable_queue:is_empty(VQ5),
+
+    VQ5.
+
+variable_queue_dropwhile_varying_ram_duration(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_dropwhile_varying_ram_duration1, [Config]).
+
+variable_queue_dropwhile_varying_ram_duration1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_dropwhile_varying_ram_duration2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_dropwhile_varying_ram_duration2(VQ0) ->
+    test_dropfetchwhile_varying_ram_duration(
+      fun (VQ1) ->
+              {_, VQ2} = rabbit_variable_queue:dropwhile(
+                           fun (_) -> false end, VQ1),
+              VQ2
+      end, VQ0).
+
+variable_queue_fetchwhile_varying_ram_duration(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_fetchwhile_varying_ram_duration1, [Config]).
+
+variable_queue_fetchwhile_varying_ram_duration1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_fetchwhile_varying_ram_duration2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_fetchwhile_varying_ram_duration2(VQ0) ->
+    test_dropfetchwhile_varying_ram_duration(
+      fun (VQ1) ->
+              {_, ok, VQ2} = rabbit_variable_queue:fetchwhile(
+                               fun (_) -> false end,
+                               fun (_, _, A) -> A end,
+                               ok, VQ1),
+              VQ2
+      end, VQ0).
+
+test_dropfetchwhile_varying_ram_duration(Fun, VQ0) ->
+    VQ1 = variable_queue_publish(false, 1, VQ0),
+    VQ2 = variable_queue_set_ram_duration_target(0, VQ1),
+    VQ3 = Fun(VQ2),
+    VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
+    VQ5 = variable_queue_publish(false, 1, VQ4),
+    VQ6 = Fun(VQ5),
+    VQ6.
+
+variable_queue_ack_limiting(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_ack_limiting1, [Config]).
+
+variable_queue_ack_limiting1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_ack_limiting2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_ack_limiting2(VQ0) ->
+    %% start by sending in a bunch of messages
+    Len = 1024,
+    VQ1 = variable_queue_publish(false, Len, VQ0),
+
+    %% squeeze and relax queue
+    Churn = Len div 32,
+    VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
+
+    %% update stats for duration
+    {_Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
+
+    %% fetch half the messages
+    {VQ4, _AckTags} = variable_queue_fetch(Len div 2, false, false, Len, VQ3),
+
+    VQ5 = check_variable_queue_status(
+            VQ4, [{len,                         Len div 2},
+                  {messages_unacknowledged_ram, Len div 2},
+                  {messages_ready_ram,          Len div 2},
+                  {messages_ram,                Len}]),
+
+    %% ensure all acks go to disk on 0 duration target
+    VQ6 = check_variable_queue_status(
+            variable_queue_set_ram_duration_target(0, VQ5),
+            [{len,                         Len div 2},
+             {target_ram_count,            0},
+             {messages_unacknowledged_ram, 0},
+             {messages_ready_ram,          0},
+             {messages_ram,                0}]),
+
+    VQ6.
+
+variable_queue_purge(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_purge1, [Config]).
+
+variable_queue_purge1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_purge2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_purge2(VQ0) ->
+    LenDepth = fun (VQ) ->
+                       {rabbit_variable_queue:len(VQ),
+                        rabbit_variable_queue:depth(VQ)}
+               end,
+    VQ1         = variable_queue_publish(false, 10, VQ0),
+    {VQ2, Acks} = variable_queue_fetch(6, false, false, 10, VQ1),
+    {4, VQ3}    = rabbit_variable_queue:purge(VQ2),
+    {0, 6}      = LenDepth(VQ3),
+    {_, VQ4}    = rabbit_variable_queue:requeue(lists:sublist(Acks, 2), VQ3),
+    {2, 6}      = LenDepth(VQ4),
+    VQ5         = rabbit_variable_queue:purge_acks(VQ4),
+    {2, 2}      = LenDepth(VQ5),
+    VQ5.
+
+variable_queue_requeue(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_requeue1, [Config]).
+
+variable_queue_requeue1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_requeue2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_requeue2(VQ0) ->
+    {_PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
+        variable_queue_with_holes(VQ0),
+    Msgs =
+        lists:zip(RequeuedMsgs,
+                  lists:duplicate(length(RequeuedMsgs), true)) ++
+        lists:zip(FreshMsgs,
+                  lists:duplicate(length(FreshMsgs), false)),
+    VQ2 = lists:foldl(fun ({I, Requeued}, VQa) ->
+                              {{M, MRequeued, _}, VQb} =
+                                  rabbit_variable_queue:fetch(true, VQa),
+                              Requeued = MRequeued, %% assertion
+                              I = msg2int(M),       %% assertion
+                              VQb
+                      end, VQ1, Msgs),
+    {empty, VQ3} = rabbit_variable_queue:fetch(true, VQ2),
+    VQ3.
+
+%% requeue from ram_pending_ack into q3, move to delta and then empty queue
+variable_queue_requeue_ram_beta(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_requeue_ram_beta1, [Config]).
+
+variable_queue_requeue_ram_beta1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_requeue_ram_beta2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_requeue_ram_beta2(VQ0) ->
+    Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2,
+    VQ1 = variable_queue_publish(false, Count, VQ0),
+    {VQ2, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ1),
+    {Back, Front} = lists:split(Count div 2, AcksR),
+    {_, VQ3} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ2),
+    VQ4 = variable_queue_set_ram_duration_target(0, VQ3),
+    {_, VQ5} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ4),
+    VQ6 = requeue_one_by_one(Front, VQ5),
+    {VQ7, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ6),
+    {_, VQ8} = rabbit_variable_queue:ack(AcksAll, VQ7),
+    VQ8.
+
+variable_queue_fold(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_fold1, [Config]).
+
+variable_queue_fold1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_fold2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_fold2(VQ0) ->
+    {PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
+        variable_queue_with_holes(VQ0),
+    Count = rabbit_variable_queue:depth(VQ1),
+    Msgs = lists:sort(PendingMsgs ++ RequeuedMsgs ++ FreshMsgs),
+    lists:foldl(fun (Cut, VQ2) ->
+                        test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ2)
+                end, VQ1, [0, 1, 2, Count div 2,
+                           Count - 1, Count, Count + 1, Count * 2]).
+
+test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ0) ->
+    {Acc, VQ1} = rabbit_variable_queue:fold(
+                   fun (M, _, Pending, A) ->
+                           MInt = msg2int(M),
+                           Pending = lists:member(MInt, PendingMsgs), %% assert
+                           case MInt =< Cut of
+                               true  -> {cont, [MInt | A]};
+                               false -> {stop, A}
+                           end
+                   end, [], VQ0),
+    Expected = lists:takewhile(fun (I) -> I =< Cut end, Msgs),
+    Expected = lists:reverse(Acc), %% assertion
+    VQ1.
+
+variable_queue_batch_publish(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_batch_publish1, [Config]).
+
+variable_queue_batch_publish1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_batch_publish2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_batch_publish2(VQ) ->
+    Count = 10,
+    VQ1 = variable_queue_batch_publish(true, Count, VQ),
+    Count = rabbit_variable_queue:len(VQ1),
+    VQ1.
+
+variable_queue_batch_publish_delivered(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_batch_publish_delivered1, [Config]).
+
+variable_queue_batch_publish_delivered1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_batch_publish_delivered2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_batch_publish_delivered2(VQ) ->
+    Count = 10,
+    VQ1 = variable_queue_batch_publish_delivered(true, Count, VQ),
+    Count = rabbit_variable_queue:depth(VQ1),
+    VQ1.
+
+%% same as test_variable_queue_requeue_ram_beta but randomly changing
+%% the queue mode after every step.
+variable_queue_mode_change(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, variable_queue_mode_change1, [Config]).
+
+variable_queue_mode_change1(Config) ->
+    with_fresh_variable_queue(
+      fun variable_queue_mode_change2/1,
+      ?config(variable_queue_type, Config)).
+
+variable_queue_mode_change2(VQ0) ->
+    Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2,
+    VQ1 = variable_queue_publish(false, Count, VQ0),
+    VQ2 = maybe_switch_queue_mode(VQ1),
+    {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
+    VQ4 = maybe_switch_queue_mode(VQ3),
+    {Back, Front} = lists:split(Count div 2, AcksR),
+    {_, VQ5} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ4),
+    VQ6 = maybe_switch_queue_mode(VQ5),
+    VQ7 = variable_queue_set_ram_duration_target(0, VQ6),
+    VQ8 = maybe_switch_queue_mode(VQ7),
+    {_, VQ9} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ8),
+    VQ10 = maybe_switch_queue_mode(VQ9),
+    VQ11 = requeue_one_by_one(Front, VQ10),
+    VQ12 = maybe_switch_queue_mode(VQ11),
+    {VQ13, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ12),
+    VQ14 = maybe_switch_queue_mode(VQ13),
+    {_, VQ15} = rabbit_variable_queue:ack(AcksAll, VQ14),
+    VQ16 = maybe_switch_queue_mode(VQ15),
+    VQ16.
+
+maybe_switch_queue_mode(VQ) ->
+    Mode = random_queue_mode(),
+    set_queue_mode(Mode, VQ).
+
+random_queue_mode() ->
+    Modes = [lazy, default],
+    lists:nth(rand_compat:uniform(length(Modes)), Modes).
+
+pub_res({_, VQS}) ->
+    VQS;
+pub_res(VQS) ->
+    VQS.
+
+make_publish(IsPersistent, PayloadFun, PropFun, N) ->
+    {rabbit_basic:message(
+       rabbit_misc:r(<<>>, exchange, <<>>),
+       <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+                                            true  -> 2;
+                                            false -> 1
+                                        end},
+       PayloadFun(N)),
+     PropFun(N, #message_properties{size = 10}),
+     false}.
+
+make_publish_delivered(IsPersistent, PayloadFun, PropFun, N) ->
+    {rabbit_basic:message(
+       rabbit_misc:r(<<>>, exchange, <<>>),
+       <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+                                            true  -> 2;
+                                            false -> 1
+                                        end},
+       PayloadFun(N)),
+     PropFun(N, #message_properties{size = 10})}.
+
+queue_name(Config, Name) ->
+    Name1 = rabbit_ct_helpers:config_to_testcase_name(Config, Name),
+    queue_name(Name1).
+
+queue_name(Name) ->
+    rabbit_misc:r(<<"/">>, queue, Name).
+
+test_queue() ->
+    queue_name(<<"test">>).
+
+init_test_queue() ->
+    TestQueue = test_queue(),
+    PRef = rabbit_guid:gen(),
+    PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef),
+    Res = rabbit_queue_index:recover(
+            TestQueue, [], false,
+            fun (MsgId) ->
+                    rabbit_msg_store:contains(MsgId, PersistentClient)
+            end,
+            fun nop/1, fun nop/1),
+    ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient),
+    Res.
+
+restart_test_queue(Qi) ->
+    _ = rabbit_queue_index:terminate([], Qi),
+    ok = rabbit_variable_queue:stop(),
+    {ok, _} = rabbit_variable_queue:start([test_queue()]),
+    init_test_queue().
+
+empty_test_queue() ->
+    ok = rabbit_variable_queue:stop(),
+    {ok, _} = rabbit_variable_queue:start([]),
+    {0, 0, Qi} = init_test_queue(),
+    _ = rabbit_queue_index:delete_and_terminate(Qi),
+    ok.
+
+with_empty_test_queue(Fun) ->
+    ok = empty_test_queue(),
+    {0, 0, Qi} = init_test_queue(),
+    rabbit_queue_index:delete_and_terminate(Fun(Qi)).
+
+restart_app() ->
+    rabbit:stop(),
+    rabbit:start().
+
+queue_index_publish(SeqIds, Persistent, Qi) ->
+    Ref = rabbit_guid:gen(),
+    MsgStore = case Persistent of
+                   true  -> ?PERSISTENT_MSG_STORE;
+                   false -> ?TRANSIENT_MSG_STORE
+               end,
+    MSCState = msg_store_client_init(MsgStore, Ref),
+    {A, B = [{_SeqId, LastMsgIdWritten} | _]} =
+        lists:foldl(
+          fun (SeqId, {QiN, SeqIdsMsgIdsAcc}) ->
+                  MsgId = rabbit_guid:gen(),
+                  QiM = rabbit_queue_index:publish(
+                          MsgId, SeqId, #message_properties{size = 10},
+                          Persistent, infinity, QiN),
+                  ok = rabbit_msg_store:write(MsgId, MsgId, MSCState),
+                  {QiM, [{SeqId, MsgId} | SeqIdsMsgIdsAcc]}
+          end, {Qi, []}, SeqIds),
+    %% do this just to force all of the publishes through to the msg_store:
+    true = rabbit_msg_store:contains(LastMsgIdWritten, MSCState),
+    ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+    {A, B}.
+
+verify_read_with_published(_Delivered, _Persistent, [], _) ->
+    ok;
+verify_read_with_published(Delivered, Persistent,
+                           [{MsgId, SeqId, _Props, Persistent, Delivered}|Read],
+                           [{SeqId, MsgId}|Published]) ->
+    verify_read_with_published(Delivered, Persistent, Read, Published);
+verify_read_with_published(_Delivered, _Persistent, _Read, _Published) ->
+    ko.
+
+nop(_) -> ok.
+nop(_, _) -> ok.
+
+msg_store_client_init(MsgStore, Ref) ->
+    rabbit_msg_store:client_init(MsgStore, Ref, undefined, undefined).
+
+variable_queue_init(Q, Recover) ->
+    rabbit_variable_queue:init(
+      Q, case Recover of
+             true  -> non_clean_shutdown;
+             false -> new
+         end, fun nop/2, fun nop/2, fun nop/1, fun nop/1).
+
+publish_and_confirm(Q, Payload, Count) ->
+    Seqs = lists:seq(1, Count),
+    [begin
+         Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>),
+                                    <<>>, #'P_basic'{delivery_mode = 2},
+                                    Payload),
+         Delivery = #delivery{mandatory = false, sender = self(),
+                              confirm = true, message = Msg, msg_seq_no = Seq,
+                              flow = noflow},
+         _QPids = rabbit_amqqueue:deliver([Q], Delivery)
+     end || Seq <- Seqs],
+    wait_for_confirms(gb_sets:from_list(Seqs)).
+
+wait_for_confirms(Unconfirmed) ->
+    case gb_sets:is_empty(Unconfirmed) of
+        true  -> ok;
+        false -> receive {'$gen_cast', {confirm, Confirmed, _}} ->
+                         wait_for_confirms(
+                           rabbit_misc:gb_sets_difference(
+                             Unconfirmed, gb_sets:from_list(Confirmed)))
+                 after ?TIMEOUT -> exit(timeout_waiting_for_confirm)
+                 end
+    end.
+
+with_fresh_variable_queue(Fun, Mode) ->
+    Ref = make_ref(),
+    Me = self(),
+    %% Run in a separate process since rabbit_msg_store will send
+    %% bump_credit messages and we want to ignore them
+    spawn_link(fun() ->
+                       ok = empty_test_queue(),
+                       VQ = variable_queue_init(test_amqqueue(true), false),
+                       S0 = variable_queue_status(VQ),
+                       assert_props(S0, [{q1, 0}, {q2, 0},
+                                         {delta,
+                                          {delta, undefined, 0, undefined}},
+                                         {q3, 0}, {q4, 0},
+                                         {len, 0}]),
+                       VQ1 = set_queue_mode(Mode, VQ),
+                       try
+                           _ = rabbit_variable_queue:delete_and_terminate(
+                                 shutdown, Fun(VQ1)),
+                           Me ! Ref
+                       catch
+                           Type:Error ->
+                               Me ! {Ref, Type, Error, erlang:get_stacktrace()}
+                       end
+               end),
+    receive
+        Ref                    -> ok;
+        {Ref, Type, Error, ST} -> exit({Type, Error, ST})
+    end,
+    passed.
+
+set_queue_mode(Mode, VQ) ->
+    VQ1 = rabbit_variable_queue:set_queue_mode(Mode, VQ),
+    S1 = variable_queue_status(VQ1),
+    assert_props(S1, [{mode, Mode}]),
+    VQ1.
+
+variable_queue_publish(IsPersistent, Count, VQ) ->
+    variable_queue_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_publish(IsPersistent, Count, PropFun, VQ) ->
+    variable_queue_publish(IsPersistent, 1, Count, PropFun,
+                           fun (_N) -> <<>> end, VQ).
+
+variable_queue_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+    variable_queue_wait_for_shuffling_end(
+      lists:foldl(
+        fun (N, VQN) ->
+                rabbit_variable_queue:publish(
+                  rabbit_basic:message(
+                    rabbit_misc:r(<<>>, exchange, <<>>),
+                    <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+                                                         true  -> 2;
+                                                         false -> 1
+                                                     end},
+                    PayloadFun(N)),
+                  PropFun(N, #message_properties{size = 10}),
+                  false, self(), noflow, VQN)
+        end, VQ, lists:seq(Start, Start + Count - 1))).
+
+variable_queue_batch_publish(IsPersistent, Count, VQ) ->
+    variable_queue_batch_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_batch_publish(IsPersistent, Count, PropFun, VQ) ->
+    variable_queue_batch_publish(IsPersistent, 1, Count, PropFun,
+                                 fun (_N) -> <<>> end, VQ).
+
+variable_queue_batch_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+    variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun,
+                                  PayloadFun, fun make_publish/4,
+                                  fun rabbit_variable_queue:batch_publish/4,
+                                  VQ).
+
+variable_queue_batch_publish_delivered(IsPersistent, Count, VQ) ->
+    variable_queue_batch_publish_delivered(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_batch_publish_delivered(IsPersistent, Count, PropFun, VQ) ->
+    variable_queue_batch_publish_delivered(IsPersistent, 1, Count, PropFun,
+                                           fun (_N) -> <<>> end, VQ).
+
+variable_queue_batch_publish_delivered(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+    variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun,
+                                  PayloadFun, fun make_publish_delivered/4,
+                                  fun rabbit_variable_queue:batch_publish_delivered/4,
+                                  VQ).
+
+variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun, PayloadFun,
+                              MakePubFun, PubFun, VQ) ->
+    Publishes =
+        [MakePubFun(IsPersistent, PayloadFun, PropFun, N)
+         || N <- lists:seq(Start, Start + Count - 1)],
+    Res = PubFun(Publishes, self(), noflow, VQ),
+    VQ1 = pub_res(Res),
+    variable_queue_wait_for_shuffling_end(VQ1).
+
+variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) ->
+    lists:foldl(fun (N, {VQN, AckTagsAcc}) ->
+                        Rem = Len - N,
+                        {{#basic_message { is_persistent = IsPersistent },
+                          IsDelivered, AckTagN}, VQM} =
+                            rabbit_variable_queue:fetch(true, VQN),
+                        Rem = rabbit_variable_queue:len(VQM),
+                        {VQM, [AckTagN | AckTagsAcc]}
+                end, {VQ, []}, lists:seq(1, Count)).
+
+test_amqqueue(Durable) ->
+    (rabbit_amqqueue:pseudo_queue(test_queue(), self()))
+        #amqqueue { durable = Durable }.
+
+assert_prop(List, Prop, Value) ->
+    case proplists:get_value(Prop, List)of
+        Value -> ok;
+        _     -> {exit, Prop, exp, Value, List}
+    end.
+
+assert_props(List, PropVals) ->
+    [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals].
+
+variable_queue_set_ram_duration_target(Duration, VQ) ->
+    variable_queue_wait_for_shuffling_end(
+      rabbit_variable_queue:set_ram_duration_target(Duration, VQ)).
+
+publish_fetch_and_ack(0, _Len, VQ0) ->
+    VQ0;
+publish_fetch_and_ack(N, Len, VQ0) ->
+    VQ1 = variable_queue_publish(false, 1, VQ0),
+    {{_Msg, false, AckTag}, VQ2} = rabbit_variable_queue:fetch(true, VQ1),
+    Len = rabbit_variable_queue:len(VQ2),
+    {_Guids, VQ3} = rabbit_variable_queue:ack([AckTag], VQ2),
+    publish_fetch_and_ack(N-1, Len, VQ3).
+
+variable_queue_status(VQ) ->
+    Keys = rabbit_backing_queue:info_keys() -- [backing_queue_status],
+    [{K, rabbit_variable_queue:info(K, VQ)} || K <- Keys] ++
+        rabbit_variable_queue:info(backing_queue_status, VQ).
+
+variable_queue_wait_for_shuffling_end(VQ) ->
+    case credit_flow:blocked() of
+        false -> VQ;
+        true  -> receive
+                     {bump_credit, Msg} ->
+                         credit_flow:handle_bump_msg(Msg),
+                         variable_queue_wait_for_shuffling_end(
+                           rabbit_variable_queue:resume(VQ))
+                 end
+    end.
+
+msg2int(#basic_message{content = #content{ payload_fragments_rev = P}}) ->
+    binary_to_term(list_to_binary(lists:reverse(P))).
+
+ack_subset(AckSeqs, Interval, Rem) ->
+    lists:filter(fun ({_Ack, N}) -> (N + Rem) rem Interval == 0 end, AckSeqs).
+
+requeue_one_by_one(Acks, VQ) ->
+    lists:foldl(fun (AckTag, VQN) ->
+                        {_MsgId, VQM} = rabbit_variable_queue:requeue(
+                                          [AckTag], VQN),
+                        VQM
+                end, VQ, Acks).
+
+%% Create a vq with messages in q1, delta, and q3, and holes (in the
+%% form of pending acks) in the latter two.
+variable_queue_with_holes(VQ0) ->
+    Interval = 2048, %% should match vq:IO_BATCH_SIZE
+    Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2 * Interval,
+    Seq = lists:seq(1, Count),
+    VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
+    VQ2 = variable_queue_publish(
+            false, 1, Count,
+            fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ1),
+    {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
+    Acks = lists:reverse(AcksR),
+    AckSeqs = lists:zip(Acks, Seq),
+    [{Subset1, _Seq1}, {Subset2, _Seq2}, {Subset3, Seq3}] =
+        [lists:unzip(ack_subset(AckSeqs, Interval, I)) || I <- [0, 1, 2]],
+    %% we requeue in three phases in order to exercise requeuing logic
+    %% in various vq states
+    {_MsgIds, VQ4} = rabbit_variable_queue:requeue(
+                       Acks -- (Subset1 ++ Subset2 ++ Subset3), VQ3),
+    VQ5 = requeue_one_by_one(Subset1, VQ4),
+    %% by now we have some messages (and holes) in delta
+    VQ6 = requeue_one_by_one(Subset2, VQ5),
+    VQ7 = variable_queue_set_ram_duration_target(infinity, VQ6),
+    %% add the q1 tail
+    VQ8 = variable_queue_publish(
+            true, Count + 1, Interval,
+            fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ7),
+    %% assertions
+    Status = variable_queue_status(VQ8),
+    vq_with_holes_assertions(VQ8, proplists:get_value(mode, Status)),
+    Depth = Count + Interval,
+    Depth = rabbit_variable_queue:depth(VQ8),
+    Len = Depth - length(Subset3),
+    Len = rabbit_variable_queue:len(VQ8),
+    {Seq3, Seq -- Seq3, lists:seq(Count + 1, Count + Interval), VQ8}.
+
+vq_with_holes_assertions(VQ, default) ->
+    [false =
+         case V of
+             {delta, _, 0, _} -> true;
+             0                -> true;
+             _                -> false
+         end || {K, V} <- variable_queue_status(VQ),
+                lists:member(K, [q1, delta, q3])];
+vq_with_holes_assertions(VQ, lazy) ->
+    [false =
+         case V of
+             {delta, _, 0, _} -> true;
+             _                -> false
+         end || {K, V} <- variable_queue_status(VQ),
+                lists:member(K, [delta])].
+
+check_variable_queue_status(VQ0, Props) ->
+    VQ1 = variable_queue_wait_for_shuffling_end(VQ0),
+    S = variable_queue_status(VQ1),
+    assert_props(S, Props),
+    VQ1.
+
+%% ---------------------------------------------------------------------------
+%% Credit flow.
+%% ---------------------------------------------------------------------------
+
+credit_flow_settings(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, credit_flow_settings1, [Config]).
+
+credit_flow_settings1(_Config) ->
+    %% default values
+    passed = test_proc(200, 50),
+
+    application:set_env(rabbit, credit_flow_default_credit, {100, 20}),
+    passed = test_proc(100, 20),
+
+    application:unset_env(rabbit, credit_flow_default_credit),
+
+    % back to defaults
+    passed = test_proc(200, 50),
+    passed.
+
+test_proc(InitialCredit, MoreCreditAfter) ->
+    Pid = spawn(fun dummy/0),
+    Pid ! {credit, self()},
+    {InitialCredit, MoreCreditAfter} =
+        receive
+            {credit, Val} -> Val
+        end,
+    passed.
+
+dummy() ->
+    credit_flow:send(self()),
+    receive
+        {credit, From} ->
+            From ! {credit, get(credit_flow_default_credit)};
+        _      ->
+            dummy()
+    end.
+
+%% -------------------------------------------------------------------
+%% dynamic_mirroring.
+%% -------------------------------------------------------------------
+
+dynamic_mirroring(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, dynamic_mirroring1, [Config]).
+
+dynamic_mirroring1(_Config) ->
+    %% Just unit tests of the node selection logic, see multi node
+    %% tests for the rest...
+    Test = fun ({NewM, NewSs, ExtraSs}, Policy, Params,
+                {MNode, SNodes, SSNodes}, All) ->
+                   {ok, M} = rabbit_mirror_queue_misc:module(Policy),
+                   {NewM, NewSs0} = M:suggested_queue_nodes(
+                                      Params, MNode, SNodes, SSNodes, All),
+                   NewSs1 = lists:sort(NewSs0),
+                   case dm_list_match(NewSs, NewSs1, ExtraSs) of
+                       ok    -> ok;
+                       error -> exit({no_match, NewSs, NewSs1, ExtraSs})
+                   end
+           end,
+
+    Test({a,[b,c],0},<<"all">>,'_',{a,[],   []},   [a,b,c]),
+    Test({a,[b,c],0},<<"all">>,'_',{a,[b,c],[b,c]},[a,b,c]),
+    Test({a,[b,c],0},<<"all">>,'_',{a,[d],  [d]},  [a,b,c]),
+
+    N = fun (Atoms) -> [list_to_binary(atom_to_list(A)) || A <- Atoms] end,
+
+    %% Add a node
+    Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[b],[b]},[a,b,c,d]),
+    Test({b,[a,c],0},<<"nodes">>,N([a,b,c]),{b,[a],[a]},[a,b,c,d]),
+    %% Add two nodes and drop one
+    Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[d],[d]},[a,b,c,d]),
+    %% Don't try to include nodes that are not running
+    Test({a,[b],  0},<<"nodes">>,N([a,b,f]),{a,[b],[b]},[a,b,c,d]),
+    %% If we can't find any of the nodes listed then just keep the master
+    Test({a,[],   0},<<"nodes">>,N([f,g,h]),{a,[b],[b]},[a,b,c,d]),
+    %% And once that's happened, still keep the master even when not listed,
+    %% if nothing is synced
+    Test({a,[b,c],0},<<"nodes">>,N([b,c]),  {a,[], []}, [a,b,c,d]),
+    Test({a,[b,c],0},<<"nodes">>,N([b,c]),  {a,[b],[]}, [a,b,c,d]),
+    %% But if something is synced we can lose the master - but make
+    %% sure we pick the new master from the nodes which are synced!
+    Test({b,[c],  0},<<"nodes">>,N([b,c]),  {a,[b],[b]},[a,b,c,d]),
+    Test({b,[c],  0},<<"nodes">>,N([c,b]),  {a,[b],[b]},[a,b,c,d]),
+
+    Test({a,[],   1},<<"exactly">>,2,{a,[],   []},   [a,b,c,d]),
+    Test({a,[],   2},<<"exactly">>,3,{a,[],   []},   [a,b,c,d]),
+    Test({a,[c],  0},<<"exactly">>,2,{a,[c],  [c]},  [a,b,c,d]),
+    Test({a,[c],  1},<<"exactly">>,3,{a,[c],  [c]},  [a,b,c,d]),
+    Test({a,[c],  0},<<"exactly">>,2,{a,[c,d],[c,d]},[a,b,c,d]),
+    Test({a,[c,d],0},<<"exactly">>,3,{a,[c,d],[c,d]},[a,b,c,d]),
+
+    passed.
+
+%% Does the first list match the second where the second is required
+%% to have exactly Extra superfluous items?
+dm_list_match([],     [],      0)     -> ok;
+dm_list_match(_,      [],     _Extra) -> error;
+dm_list_match([H|T1], [H |T2], Extra) -> dm_list_match(T1, T2, Extra);
+dm_list_match(L1,     [_H|T2], Extra) -> dm_list_match(L1, T2, Extra - 1).
+
+%% ---------------------------------------------------------------------------
+%% file_handle_cache.
+%% ---------------------------------------------------------------------------
+
+file_handle_cache(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, file_handle_cache1, [Config]).
+
+file_handle_cache1(_Config) ->
+    %% test copying when there is just one spare handle
+    Limit = file_handle_cache:get_limit(),
+    ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores
+    TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"),
+    ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")),
+    [Src1, Dst1, Src2, Dst2] = Files =
+        [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]],
+    Content = <<"foo">>,
+    CopyFun = fun (Src, Dst) ->
+                      {ok, Hdl} = prim_file:open(Src, [binary, write]),
+                      ok = prim_file:write(Hdl, Content),
+                      ok = prim_file:sync(Hdl),
+                      prim_file:close(Hdl),
+
+                      {ok, SrcHdl} = file_handle_cache:open(Src, [read], []),
+                      {ok, DstHdl} = file_handle_cache:open(Dst, [write], []),
+                      Size = size(Content),
+                      {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size),
+                      ok = file_handle_cache:delete(SrcHdl),
+                      ok = file_handle_cache:delete(DstHdl)
+              end,
+    Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open(
+                                        filename:join(TmpDir, "file5"),
+                                        [write], []),
+                          receive {next, Pid1} -> Pid1 ! {next, self()} end,
+                          file_handle_cache:delete(Hdl),
+                          %% This will block and never return, so we
+                          %% exercise the fhc tidying up the pending
+                          %% queue on the death of a process.
+                          ok = CopyFun(Src1, Dst1)
+                end),
+    ok = CopyFun(Src1, Dst1),
+    ok = file_handle_cache:set_limit(2),
+    Pid ! {next, self()},
+    receive {next, Pid} -> ok end,
+    timer:sleep(100),
+    Pid1 = spawn(fun () -> CopyFun(Src2, Dst2) end),
+    timer:sleep(100),
+    erlang:monitor(process, Pid),
+    erlang:monitor(process, Pid1),
+    exit(Pid, kill),
+    exit(Pid1, kill),
+    receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end,
+    receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end,
+    [file:delete(File) || File <- Files],
+    ok = file_handle_cache:set_limit(Limit),
+    passed.
+
+%% -------------------------------------------------------------------
+%% Log management.
+%% -------------------------------------------------------------------
+
+log_management(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, log_management1, [Config]).
+
+log_management1(_Config) ->
+    override_group_leader(),
+
+    MainLog = rabbit:log_location(kernel),
+    SaslLog = rabbit:log_location(sasl),
+    Suffix = ".1",
+
+    ok = test_logs_working(MainLog, SaslLog),
+
+    %% prepare basic logs
+    file:delete([MainLog, Suffix]),
+    file:delete([SaslLog, Suffix]),
+
+    %% simple logs reopening
+    ok = control_action(rotate_logs, []),
+    ok = test_logs_working(MainLog, SaslLog),
+
+    %% simple log rotation
+    ok = control_action(rotate_logs, [Suffix]),
+    [true, true] = non_empty_files([[MainLog, Suffix], [SaslLog, Suffix]]),
+    [true, true] = empty_files([MainLog, SaslLog]),
+    ok = test_logs_working(MainLog, SaslLog),
+
+    %% reopening logs with log rotation performed first
+    ok = clean_logs([MainLog, SaslLog], Suffix),
+    ok = control_action(rotate_logs, []),
+    ok = file:rename(MainLog, [MainLog, Suffix]),
+    ok = file:rename(SaslLog, [SaslLog, Suffix]),
+    ok = test_logs_working([MainLog, Suffix], [SaslLog, Suffix]),
+    ok = control_action(rotate_logs, []),
+    ok = test_logs_working(MainLog, SaslLog),
+
+    %% log rotation on empty files (the main log will have a ctl action logged)
+    ok = clean_logs([MainLog, SaslLog], Suffix),
+    ok = control_action(rotate_logs, []),
+    ok = control_action(rotate_logs, [Suffix]),
+    [false, true] = empty_files([[MainLog, Suffix], [SaslLog, Suffix]]),
+
+    %% logs with suffix are not writable
+    ok = control_action(rotate_logs, [Suffix]),
+    ok = make_files_non_writable([[MainLog, Suffix], [SaslLog, Suffix]]),
+    ok = control_action(rotate_logs, [Suffix]),
+    ok = test_logs_working(MainLog, SaslLog),
+
+    %% logging directed to tty (first, remove handlers)
+    ok = delete_log_handlers([rabbit_sasl_report_file_h,
+                              rabbit_error_logger_file_h]),
+    ok = clean_logs([MainLog, SaslLog], Suffix),
+    ok = application:set_env(rabbit, sasl_error_logger, tty),
+    ok = application:set_env(rabbit, error_logger, tty),
+    ok = control_action(rotate_logs, []),
+    [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]),
+
+    %% rotate logs when logging is turned off
+    ok = application:set_env(rabbit, sasl_error_logger, false),
+    ok = application:set_env(rabbit, error_logger, silent),
+    ok = control_action(rotate_logs, []),
+    [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]),
+
+    %% cleanup
+    ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}),
+    ok = application:set_env(rabbit, error_logger, {file, MainLog}),
+    ok = add_log_handlers([{rabbit_error_logger_file_h, MainLog},
+                           {rabbit_sasl_report_file_h, SaslLog}]),
+    passed.
+
+log_management_during_startup(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, log_management_during_startup1, [Config]).
+
+log_management_during_startup1(_Config) ->
+    MainLog = rabbit:log_location(kernel),
+    SaslLog = rabbit:log_location(sasl),
+
+    %% start application with simple tty logging
+    ok = control_action(stop_app, []),
+    ok = application:set_env(rabbit, error_logger, tty),
+    ok = application:set_env(rabbit, sasl_error_logger, tty),
+    ok = add_log_handlers([{error_logger_tty_h, []},
+                           {sasl_report_tty_h, []}]),
+    ok = control_action(start_app, []),
+
+    %% start application with tty logging and
+    %% proper handlers not installed
+    ok = control_action(stop_app, []),
+    ok = error_logger:tty(false),
+    ok = delete_log_handlers([sasl_report_tty_h]),
+    ok = case catch control_action(start_app, []) of
+             ok -> exit({got_success_but_expected_failure,
+                         log_rotation_tty_no_handlers_test});
+             {badrpc, {'EXIT', {error,
+                                {cannot_log_to_tty, _, not_installed}}}} -> ok
+         end,
+
+    %% fix sasl logging
+    ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}),
+
+    %% start application with logging to non-existing directory
+    TmpLog = "/tmp/rabbit-tests/test.log",
+    delete_file(TmpLog),
+    ok = control_action(stop_app, []),
+    ok = application:set_env(rabbit, error_logger, {file, TmpLog}),
+
+    ok = delete_log_handlers([rabbit_error_logger_file_h]),
+    ok = add_log_handlers([{error_logger_file_h, MainLog}]),
+    ok = control_action(start_app, []),
+
+    %% start application with logging to directory with no
+    %% write permissions
+    ok = control_action(stop_app, []),
+    TmpDir = "/tmp/rabbit-tests",
+    ok = set_permissions(TmpDir, 8#00400),
+    ok = delete_log_handlers([rabbit_error_logger_file_h]),
+    ok = add_log_handlers([{error_logger_file_h, MainLog}]),
+    ok = case control_action(start_app, []) of
+             ok -> exit({got_success_but_expected_failure,
+                         log_rotation_no_write_permission_dir_test});
+             {badrpc, {'EXIT',
+                       {error, {cannot_log_to_file, _, _}}}} -> ok
+         end,
+
+    %% start application with logging to a subdirectory which
+    %% parent directory has no write permissions
+    ok = control_action(stop_app, []),
+    TmpTestDir = "/tmp/rabbit-tests/no-permission/test/log",
+    ok = application:set_env(rabbit, error_logger, {file, TmpTestDir}),
+    ok = add_log_handlers([{error_logger_file_h, MainLog}]),
+    ok = case control_action(start_app, []) of
+             ok -> exit({got_success_but_expected_failure,
+                         log_rotatation_parent_dirs_test});
+             {badrpc,
+              {'EXIT',
+               {error, {cannot_log_to_file, _,
+                        {error,
+                         {cannot_create_parent_dirs, _, eacces}}}}}} -> ok
+         end,
+    ok = set_permissions(TmpDir, 8#00700),
+    ok = set_permissions(TmpLog, 8#00600),
+    ok = delete_file(TmpLog),
+    ok = file:del_dir(TmpDir),
+
+    %% start application with standard error_logger_file_h
+    %% handler not installed
+    ok = control_action(stop_app, []),
+    ok = application:set_env(rabbit, error_logger, {file, MainLog}),
+    ok = control_action(start_app, []),
+
+    %% start application with standard sasl handler not installed
+    %% and rabbit main log handler installed correctly
+    ok = control_action(stop_app, []),
+    ok = delete_log_handlers([rabbit_sasl_report_file_h]),
+    ok = control_action(start_app, []),
+    passed.
+
+%% "rabbitmqctl rotate_logs" without additional parameters
+%% shouldn't truncate files.
+rotate_logs_without_suffix(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, rotate_logs_without_suffix1, [Config]).
+
+rotate_logs_without_suffix1(_Config) ->
+    override_group_leader(),
+
+    MainLog = rabbit:log_location(kernel),
+    SaslLog = rabbit:log_location(sasl),
+    Suffix = ".1",
+    file:delete(MainLog),
+    file:delete(SaslLog),
+
+    %% Empty log-files should be created
+    ok = control_action(rotate_logs, []),
+    [true, true] = empty_files([MainLog, SaslLog]),
+
+    %% Write something to log files and simulate external log rotation
+    ok = test_logs_working(MainLog, SaslLog),
+    ok = file:rename(MainLog, [MainLog, Suffix]),
+    ok = file:rename(SaslLog, [SaslLog, Suffix]),
+
+    %% Create non-empty files
+    TestData = "test-data\n",
+    file:write_file(MainLog, TestData),
+    file:write_file(SaslLog, TestData),
+
+    %% Nothing should be truncated - neither moved files which are still
+    %% opened by server, nor new log files that should be just reopened.
+    ok = control_action(rotate_logs, []),
+    [true, true, true, true] =
+        non_empty_files([MainLog, SaslLog, [MainLog, Suffix],
+            [SaslLog, Suffix]]),
+
+    %% And log files should be re-opened - new log records should go to
+    %% new files.
+    ok = test_logs_working(MainLog, SaslLog),
+    true = (rabbit_file:file_size(MainLog) > length(TestData)),
+    true = (rabbit_file:file_size(SaslLog) > length(TestData)),
+    passed.
+
+override_group_leader() ->
+    %% Override group leader, otherwise SASL fake events are ignored by
+    %% the error_logger local to RabbitMQ.
+    {group_leader, Leader} = erlang:process_info(whereis(rabbit), group_leader),
+    erlang:group_leader(Leader, self()).
+
+empty_files(Files) ->
+    [case file:read_file_info(File) of
+         {ok, FInfo} -> FInfo#file_info.size == 0;
+         Error       -> Error
+     end || File <- Files].
+
+non_empty_files(Files) ->
+    [case EmptyFile of
+         {error, Reason} -> {error, Reason};
+         _               -> not(EmptyFile)
+     end || EmptyFile <- empty_files(Files)].
+
+test_logs_working(MainLogFile, SaslLogFile) ->
+    ok = rabbit_log:error("Log a test message~n"),
+    ok = error_logger:error_report(crash_report, [fake_crash_report, ?MODULE]),
+    %% give the error loggers some time to catch up
+    timer:sleep(100),
+    [true, true] = non_empty_files([MainLogFile, SaslLogFile]),
+    ok.
+
+set_permissions(Path, Mode) ->
+    case file:read_file_info(Path) of
+        {ok, FInfo} -> file:write_file_info(
+                         Path,
+                         FInfo#file_info{mode=Mode});
+        Error       -> Error
+    end.
+
+clean_logs(Files, Suffix) ->
+    [begin
+         ok = delete_file(File),
+         ok = delete_file([File, Suffix])
+     end || File <- Files],
+    ok.
+
+assert_ram_node() ->
+    case rabbit_mnesia:node_type() of
+        disc -> exit('not_ram_node');
+        ram  -> ok
+    end.
+
+assert_disc_node() ->
+    case rabbit_mnesia:node_type() of
+        disc -> ok;
+        ram  -> exit('not_disc_node')
+    end.
+
+delete_file(File) ->
+    case file:delete(File) of
+        ok              -> ok;
+        {error, enoent} -> ok;
+        Error           -> Error
+    end.
+
+make_files_non_writable(Files) ->
+    [ok = file:write_file_info(File, #file_info{mode=8#444}) ||
+        File <- Files],
+    ok.
+
+add_log_handlers(Handlers) ->
+    [ok = error_logger:add_report_handler(Handler, Args) ||
+        {Handler, Args} <- Handlers],
+    ok.
+
+%% sasl_report_file_h returns [] during terminate
+%% see: https://github.com/erlang/otp/blob/maint/lib/stdlib/src/error_logger_file_h.erl#L98
+%%
+%% error_logger_file_h returns ok since OTP 18.1
+%% see: https://github.com/erlang/otp/blob/maint/lib/stdlib/src/error_logger_file_h.erl#L98
+delete_log_handlers(Handlers) ->
+    [ok_or_empty_list(error_logger:delete_report_handler(Handler))
+     || Handler <- Handlers],
+    ok.
+
+ok_or_empty_list([]) ->
+    [];
+ok_or_empty_list(ok) ->
+    ok.
+
+%% ---------------------------------------------------------------------------
+%% Password hashing.
+%% ---------------------------------------------------------------------------
+
+password_hashing(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, password_hashing1, [Config]).
+
+password_hashing1(_Config) ->
+    rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(),
+    application:set_env(rabbit, password_hashing_module,
+                        rabbit_password_hashing_md5),
+    rabbit_password_hashing_md5    = rabbit_password:hashing_mod(),
+    application:set_env(rabbit, password_hashing_module,
+                        rabbit_password_hashing_sha256),
+    rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(),
+
+    rabbit_password_hashing_sha256 =
+        rabbit_password:hashing_mod(rabbit_password_hashing_sha256),
+    rabbit_password_hashing_md5    =
+        rabbit_password:hashing_mod(rabbit_password_hashing_md5),
+    rabbit_password_hashing_md5    =
+        rabbit_password:hashing_mod(undefined),
+
+    rabbit_password_hashing_md5    =
+        rabbit_auth_backend_internal:hashing_module_for_user(
+          #internal_user{}),
+    rabbit_password_hashing_md5    =
+        rabbit_auth_backend_internal:hashing_module_for_user(
+          #internal_user{
+             hashing_algorithm = undefined
+            }),
+    rabbit_password_hashing_md5    =
+        rabbit_auth_backend_internal:hashing_module_for_user(
+          #internal_user{
+             hashing_algorithm = rabbit_password_hashing_md5
+            }),
+
+    rabbit_password_hashing_sha256 =
+        rabbit_auth_backend_internal:hashing_module_for_user(
+          #internal_user{
+             hashing_algorithm = rabbit_password_hashing_sha256
+            }),
+
+    passed.
+
+change_password(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, change_password1, [Config]).
+
+change_password1(_Config) ->
+    UserName = <<"test_user">>,
+    Password = <<"test_password">>,
+    case rabbit_auth_backend_internal:lookup_user(UserName) of
+        {ok, _} -> rabbit_auth_backend_internal:delete_user(UserName);
+        _       -> ok
+    end,
+    ok = application:set_env(rabbit, password_hashing_module,
+                             rabbit_password_hashing_md5),
+    ok = rabbit_auth_backend_internal:add_user(UserName, Password),
+    {ok, #auth_user{username = UserName}} =
+        rabbit_auth_backend_internal:user_login_authentication(
+            UserName, [{password, Password}]),
+    ok = application:set_env(rabbit, password_hashing_module,
+                             rabbit_password_hashing_sha256),
+    {ok, #auth_user{username = UserName}} =
+        rabbit_auth_backend_internal:user_login_authentication(
+            UserName, [{password, Password}]),
+
+    NewPassword = <<"test_password1">>,
+    ok = rabbit_auth_backend_internal:change_password(UserName, NewPassword),
+    {ok, #auth_user{username = UserName}} =
+        rabbit_auth_backend_internal:user_login_authentication(
+            UserName, [{password, NewPassword}]),
+
+    {refused, _, [UserName]} =
+        rabbit_auth_backend_internal:user_login_authentication(
+            UserName, [{password, Password}]),
+    passed.
+
+%% -------------------------------------------------------------------
+%% rabbitmqctl.
+%% -------------------------------------------------------------------
+
+list_operations_timeout_pass(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, list_operations_timeout_pass1, [Config]).
+
+list_operations_timeout_pass1(Config) ->
+    %% create a few things so there is some useful information to list
+    {_Writer1, Limiter1, Ch1} = rabbit_ct_broker_helpers:test_channel(),
+    {_Writer2, Limiter2, Ch2} = rabbit_ct_broker_helpers:test_channel(),
+
+    [Q, Q2] = [Queue || Name <- [<<"list_operations_timeout_pass-q1">>,
+                                 <<"list_operations_timeout_pass-q2">>],
+                        {new, Queue = #amqqueue{}} <-
+                            [rabbit_amqqueue:declare(
+                               rabbit_misc:r(<<"/">>, queue, Name),
+                               false, false, [], none)]],
+
+    ok = rabbit_amqqueue:basic_consume(
+           Q, true, Ch1, Limiter1, false, 0, <<"ctag1">>, true, [],
+           undefined),
+    ok = rabbit_amqqueue:basic_consume(
+           Q2, true, Ch2, Limiter2, false, 0, <<"ctag2">>, true, [],
+           undefined),
+
+    %% list users
+    ok = control_action(add_user,
+      ["list_operations_timeout_pass-user",
+       "list_operations_timeout_pass-password"]),
+    {error, {user_already_exists, _}} =
+        control_action(add_user,
+          ["list_operations_timeout_pass-user",
+           "list_operations_timeout_pass-password"]),
+    ok = control_action_t(list_users, [], ?TIMEOUT_LIST_OPS_PASS),
+
+    %% list parameters
+    ok = dummy_runtime_parameters:register(),
+    ok = control_action(set_parameter, ["test", "good", "123"]),
+    ok = control_action_t(list_parameters, [], ?TIMEOUT_LIST_OPS_PASS),
+    ok = control_action(clear_parameter, ["test", "good"]),
+    dummy_runtime_parameters:unregister(),
+
+    %% list vhosts
+    ok = control_action(add_vhost, ["/list_operations_timeout_pass-vhost"]),
+    {error, {vhost_already_exists, _}} =
+        control_action(add_vhost, ["/list_operations_timeout_pass-vhost"]),
+    ok = control_action_t(list_vhosts, [], ?TIMEOUT_LIST_OPS_PASS),
+
+    %% list permissions
+    ok = control_action(set_permissions,
+      ["list_operations_timeout_pass-user", ".*", ".*", ".*"],
+      [{"-p", "/list_operations_timeout_pass-vhost"}]),
+    ok = control_action_t(list_permissions, [],
+      [{"-p", "/list_operations_timeout_pass-vhost"}],
+      ?TIMEOUT_LIST_OPS_PASS),
+
+    %% list user permissions
+    ok = control_action_t(list_user_permissions,
+      ["list_operations_timeout_pass-user"],
+      ?TIMEOUT_LIST_OPS_PASS),
+
+    %% list policies
+    ok = control_action_opts(
+      ["set_policy", "list_operations_timeout_pass-policy", ".*",
+       "{\"ha-mode\":\"all\"}"]),
+    ok = control_action_t(list_policies, [], ?TIMEOUT_LIST_OPS_PASS),
+    ok = control_action(clear_policy, ["list_operations_timeout_pass-policy"]),
+
+    %% list queues
+    ok = info_action_t(list_queues,
+      rabbit_amqqueue:info_keys(), false,
+      ?TIMEOUT_LIST_OPS_PASS),
+
+    %% list exchanges
+    ok = info_action_t(list_exchanges,
+      rabbit_exchange:info_keys(), true,
+      ?TIMEOUT_LIST_OPS_PASS),
+
+    %% list bindings
+    ok = info_action_t(list_bindings,
+      rabbit_binding:info_keys(), true,
+      ?TIMEOUT_LIST_OPS_PASS),
+
+    %% list connections
+    H = ?config(rmq_hostname, Config),
+    P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+    {ok, C1} = gen_tcp:connect(H, P, [binary, {active, false}]),
+    gen_tcp:send(C1, <<"AMQP", 0, 0, 9, 1>>),
+    {ok, <<1,0,0>>} = gen_tcp:recv(C1, 3, 100),
+
+    {ok, C2} = gen_tcp:connect(H, P, [binary, {active, false}]),
+    gen_tcp:send(C2, <<"AMQP", 0, 0, 9, 1>>),
+    {ok, <<1,0,0>>} = gen_tcp:recv(C2, 3, 100),
+
+    ok = info_action_t(
+      list_connections, rabbit_networking:connection_info_keys(), false,
+      ?TIMEOUT_LIST_OPS_PASS),
+
+    %% list consumers
+    ok = info_action_t(
+      list_consumers, rabbit_amqqueue:consumer_info_keys(), false,
+      ?TIMEOUT_LIST_OPS_PASS),
+
+    %% list channels
+    ok = info_action_t(
+      list_channels, rabbit_channel:info_keys(), false,
+      ?TIMEOUT_LIST_OPS_PASS),
+
+    %% do some cleaning up
+    ok = control_action(delete_user, ["list_operations_timeout_pass-user"]),
+    {error, {no_such_user, _}} =
+        control_action(delete_user, ["list_operations_timeout_pass-user"]),
+
+    ok = control_action(delete_vhost, ["/list_operations_timeout_pass-vhost"]),
+    {error, {no_such_vhost, _}} =
+        control_action(delete_vhost, ["/list_operations_timeout_pass-vhost"]),
+
+    %% close_connection
+    Conns = rabbit_ct_broker_helpers:get_connection_pids([C1, C2]),
+    [ok, ok] = [ok = control_action(
+        close_connection, [rabbit_misc:pid_to_string(ConnPid), "go away"])
+     || ConnPid <- Conns],
+
+    %% cleanup queues
+    [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]],
+
+    [begin
+         unlink(Chan),
+         ok = rabbit_channel:shutdown(Chan)
+     end || Chan <- [Ch1, Ch2]],
+    passed.
+
+user_management(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, user_management1, [Config]).
+
+user_management1(_Config) ->
+
+    %% lots if stuff that should fail
+    {error, {no_such_user, _}} =
+        control_action(delete_user,
+          ["user_management-user"]),
+    {error, {no_such_user, _}} =
+        control_action(change_password,
+          ["user_management-user", "user_management-password"]),
+    {error, {no_such_vhost, _}} =
+        control_action(delete_vhost,
+          ["/user_management-vhost"]),
+    {error, {no_such_user, _}} =
+        control_action(set_permissions,
+          ["user_management-user", ".*", ".*", ".*"]),
+    {error, {no_such_user, _}} =
+        control_action(clear_permissions,
+          ["user_management-user"]),
+    {error, {no_such_user, _}} =
+        control_action(list_user_permissions,
+          ["user_management-user"]),
+    {error, {no_such_vhost, _}} =
+        control_action(list_permissions, [],
+          [{"-p", "/user_management-vhost"}]),
+    {error, {invalid_regexp, _, _}} =
+        control_action(set_permissions,
+          ["guest", "+foo", ".*", ".*"]),
+    {error, {no_such_user, _}} =
+        control_action(set_user_tags,
+          ["user_management-user", "bar"]),
+
+    %% user creation
+    ok = control_action(add_user,
+      ["user_management-user", "user_management-password"]),
+    {error, {user_already_exists, _}} =
+        control_action(add_user,
+          ["user_management-user", "user_management-password"]),
+    ok = control_action(clear_password,
+      ["user_management-user"]),
+    ok = control_action(change_password,
+      ["user_management-user", "user_management-newpassword"]),
+
+    TestTags = fun (Tags) ->
+                       Args = ["user_management-user" | [atom_to_list(T) || T <- Tags]],
+                       ok = control_action(set_user_tags, Args),
+                       {ok, #internal_user{tags = Tags}} =
+                           rabbit_auth_backend_internal:lookup_user(
+                             <<"user_management-user">>),
+                       ok = control_action(list_users, [])
+               end,
+    TestTags([foo, bar, baz]),
+    TestTags([administrator]),
+    TestTags([]),
+
+    %% user authentication
+    ok = control_action(authenticate_user,
+      ["user_management-user", "user_management-newpassword"]),
+    {refused, _User, _Format, _Params} =
+        control_action(authenticate_user,
+          ["user_management-user", "user_management-password"]),
+
+    %% vhost creation
+    ok = control_action(add_vhost,
+      ["/user_management-vhost"]),
+    {error, {vhost_already_exists, _}} =
+        control_action(add_vhost,
+          ["/user_management-vhost"]),
+    ok = control_action(list_vhosts, []),
+
+    %% user/vhost mapping
+    ok = control_action(set_permissions,
+      ["user_management-user", ".*", ".*", ".*"],
+      [{"-p", "/user_management-vhost"}]),
+    ok = control_action(set_permissions,
+      ["user_management-user", ".*", ".*", ".*"],
+      [{"-p", "/user_management-vhost"}]),
+    ok = control_action(set_permissions,
+      ["user_management-user", ".*", ".*", ".*"],
+      [{"-p", "/user_management-vhost"}]),
+    ok = control_action(list_permissions, [],
+      [{"-p", "/user_management-vhost"}]),
+    ok = control_action(list_permissions, [],
+      [{"-p", "/user_management-vhost"}]),
+    ok = control_action(list_user_permissions,
+      ["user_management-user"]),
+
+    %% user/vhost unmapping
+    ok = control_action(clear_permissions,
+      ["user_management-user"], [{"-p", "/user_management-vhost"}]),
+    ok = control_action(clear_permissions,
+      ["user_management-user"], [{"-p", "/user_management-vhost"}]),
+
+    %% vhost deletion
+    ok = control_action(delete_vhost,
+      ["/user_management-vhost"]),
+    {error, {no_such_vhost, _}} =
+        control_action(delete_vhost,
+          ["/user_management-vhost"]),
+
+    %% deleting a populated vhost
+    ok = control_action(add_vhost,
+      ["/user_management-vhost"]),
+    ok = control_action(set_permissions,
+      ["user_management-user", ".*", ".*", ".*"],
+      [{"-p", "/user_management-vhost"}]),
+    {new, _} = rabbit_amqqueue:declare(
+                 rabbit_misc:r(<<"/user_management-vhost">>, queue,
+                               <<"user_management-vhost-queue">>),
+                 true, false, [], none),
+    ok = control_action(delete_vhost,
+      ["/user_management-vhost"]),
+
+    %% user deletion
+    ok = control_action(delete_user,
+      ["user_management-user"]),
+    {error, {no_such_user, _}} =
+        control_action(delete_user,
+          ["user_management-user"]),
+
+    passed.
+
+runtime_parameters(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, runtime_parameters1, [Config]).
+
+runtime_parameters1(_Config) ->
+    dummy_runtime_parameters:register(),
+    Good = fun(L) -> ok                = control_action(set_parameter, L) end,
+    Bad  = fun(L) -> {error_string, _} = control_action(set_parameter, L) end,
+
+    %% Acceptable for bijection
+    Good(["test", "good", "\"ignore\""]),
+    Good(["test", "good", "123"]),
+    Good(["test", "good", "true"]),
+    Good(["test", "good", "false"]),
+    Good(["test", "good", "null"]),
+    Good(["test", "good", "{\"key\": \"value\"}"]),
+
+    %% Invalid json
+    Bad(["test", "good", "atom"]),
+    Bad(["test", "good", "{\"foo\": \"bar\""]),
+    Bad(["test", "good", "{foo: \"bar\"}"]),
+
+    %% Test actual validation hook
+    Good(["test", "maybe", "\"good\""]),
+    Bad(["test", "maybe", "\"bad\""]),
+    Good(["test", "admin", "\"ignore\""]), %% ctl means 'user' -> none
+
+    ok = control_action(list_parameters, []),
+
+    ok = control_action(clear_parameter, ["test", "good"]),
+    ok = control_action(clear_parameter, ["test", "maybe"]),
+    ok = control_action(clear_parameter, ["test", "admin"]),
+    {error_string, _} =
+        control_action(clear_parameter, ["test", "neverexisted"]),
+
+    %% We can delete for a component that no longer exists
+    Good(["test", "good", "\"ignore\""]),
+    dummy_runtime_parameters:unregister(),
+    ok = control_action(clear_parameter, ["test", "good"]),
+    passed.
+
+policy_validation(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, policy_validation1, [Config]).
+
+policy_validation1(_Config) ->
+    PolicyName = "runtime_parameters-policy",
+    dummy_runtime_parameters:register_policy_validator(),
+    SetPol = fun (Key, Val) ->
+                     control_action_opts(
+                       ["set_policy", PolicyName, ".*",
+                        rabbit_misc:format("{\"~s\":~p}", [Key, Val])])
+             end,
+    OK     = fun (Key, Val) ->
+                 ok = SetPol(Key, Val),
+                 true = does_policy_exist(PolicyName,
+                   [{definition, [{list_to_binary(Key), Val}]}])
+             end,
+
+    OK("testeven", []),
+    OK("testeven", [1, 2]),
+    OK("testeven", [1, 2, 3, 4]),
+    OK("testpos",  [2, 5, 5678]),
+
+    {error_string, _} = SetPol("testpos",  [-1, 0, 1]),
+    {error_string, _} = SetPol("testeven", [ 1, 2, 3]),
+
+    ok = control_action(clear_policy, [PolicyName]),
+    dummy_runtime_parameters:unregister_policy_validator(),
+    passed.
+
+policy_opts_validation(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, policy_opts_validation1, [Config]).
+
+policy_opts_validation1(_Config) ->
+    PolicyName = "policy_opts_validation-policy",
+    Set  = fun (Extra) -> control_action_opts(
+                            ["set_policy", PolicyName,
+                             ".*", "{\"ha-mode\":\"all\"}"
+                             | Extra]) end,
+    OK   = fun (Extra, Props) ->
+               ok = Set(Extra),
+               true = does_policy_exist(PolicyName, Props)
+           end,
+    Fail = fun (Extra) ->
+            case Set(Extra) of
+                {error_string, _} -> ok;
+                no_command when Extra =:= ["--priority"] -> ok;
+                no_command when Extra =:= ["--apply-to"] -> ok;
+                {'EXIT',
+                 {function_clause,
+                  [{rabbit_control_main,action, _, _} | _]}}
+                when Extra =:= ["--offline"] -> ok
+          end
+    end,
+
+    OK  ([], [{priority, 0}, {'apply-to', <<"all">>}]),
+
+    OK  (["--priority", "0"], [{priority, 0}]),
+    OK  (["--priority", "3"], [{priority, 3}]),
+    Fail(["--priority", "banana"]),
+    Fail(["--priority"]),
+
+    OK  (["--apply-to", "all"],    [{'apply-to', <<"all">>}]),
+    OK  (["--apply-to", "queues"], [{'apply-to', <<"queues">>}]),
+    Fail(["--apply-to", "bananas"]),
+    Fail(["--apply-to"]),
+
+    OK  (["--priority", "3",      "--apply-to", "queues"], [{priority, 3}, {'apply-to', <<"queues">>}]),
+    Fail(["--priority", "banana", "--apply-to", "queues"]),
+    Fail(["--priority", "3",      "--apply-to", "bananas"]),
+
+    Fail(["--offline"]),
+
+    ok = control_action(clear_policy, [PolicyName]),
+    passed.
+
+ha_policy_validation(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, ha_policy_validation1, [Config]).
+
+ha_policy_validation1(_Config) ->
+    PolicyName = "ha_policy_validation-policy",
+    Set  = fun (JSON) -> control_action_opts(
+                           ["set_policy", PolicyName,
+                            ".*", JSON]) end,
+    OK   = fun (JSON, Def) ->
+               ok = Set(JSON),
+               true = does_policy_exist(PolicyName, [{definition, Def}])
+           end,
+    Fail = fun (JSON) -> {error_string, _} = Set(JSON) end,
+
+    OK  ("{\"ha-mode\":\"all\"}", [{<<"ha-mode">>, <<"all">>}]),
+    Fail("{\"ha-mode\":\"made_up\"}"),
+
+    Fail("{\"ha-mode\":\"nodes\"}"),
+    Fail("{\"ha-mode\":\"nodes\",\"ha-params\":2}"),
+    Fail("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",2]}"),
+    OK  ("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",\"b\"]}",
+      [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, [<<"a">>, <<"b">>]}]),
+    Fail("{\"ha-params\":[\"a\",\"b\"]}"),
+
+    Fail("{\"ha-mode\":\"exactly\"}"),
+    Fail("{\"ha-mode\":\"exactly\",\"ha-params\":[\"a\",\"b\"]}"),
+    OK  ("{\"ha-mode\":\"exactly\",\"ha-params\":2}",
+      [{<<"ha-mode">>, <<"exactly">>}, {<<"ha-params">>, 2}]),
+    Fail("{\"ha-params\":2}"),
+
+    OK  ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"manual\"}",
+      [{<<"ha-mode">>, <<"all">>}, {<<"ha-sync-mode">>, <<"manual">>}]),
+    OK  ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}",
+      [{<<"ha-mode">>, <<"all">>}, {<<"ha-sync-mode">>, <<"automatic">>}]),
+    Fail("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"made_up\"}"),
+    Fail("{\"ha-sync-mode\":\"manual\"}"),
+    Fail("{\"ha-sync-mode\":\"automatic\"}"),
+
+    ok = control_action(clear_policy, [PolicyName]),
+    passed.
+
+queue_master_location_policy_validation(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, queue_master_location_policy_validation1, [Config]).
+
+queue_master_location_policy_validation1(_Config) ->
+    PolicyName = "queue_master_location_policy_validation-policy",
+    Set  = fun (JSON) ->
+                   control_action_opts(
+                     ["set_policy", PolicyName, ".*", JSON])
+           end,
+    OK   = fun (JSON, Def) ->
+               ok = Set(JSON),
+               true = does_policy_exist(PolicyName, [{definition, Def}])
+           end,
+    Fail = fun (JSON) -> {error_string, _} = Set(JSON) end,
+
+    OK  ("{\"queue-master-locator\":\"min-masters\"}",
+      [{<<"queue-master-locator">>, <<"min-masters">>}]),
+    OK  ("{\"queue-master-locator\":\"client-local\"}",
+      [{<<"queue-master-locator">>, <<"client-local">>}]),
+    OK  ("{\"queue-master-locator\":\"random\"}",
+      [{<<"queue-master-locator">>, <<"random">>}]),
+    Fail("{\"queue-master-locator\":\"made_up\"}"),
+
+    ok = control_action(clear_policy, [PolicyName]),
+    passed.
+
+queue_modes_policy_validation(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, queue_modes_policy_validation1, [Config]).
+
+queue_modes_policy_validation1(_Config) ->
+    PolicyName = "queue_modes_policy_validation-policy",
+    Set  = fun (JSON) ->
+                   control_action_opts(
+                     ["set_policy", PolicyName, ".*", JSON])
+           end,
+    OK   = fun (JSON, Def) ->
+               ok = Set(JSON),
+               true = does_policy_exist(PolicyName, [{definition, Def}])
+           end,
+    Fail = fun (JSON) -> {error_string, _} = Set(JSON) end,
+
+    OK  ("{\"queue-mode\":\"lazy\"}",
+      [{<<"queue-mode">>, <<"lazy">>}]),
+    OK  ("{\"queue-mode\":\"default\"}",
+      [{<<"queue-mode">>, <<"default">>}]),
+    Fail("{\"queue-mode\":\"wrong\"}"),
+
+    ok = control_action(clear_policy, [PolicyName]),
+    passed.
+
+vhost_removed_while_updating_policy(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, vhost_removed_while_updating_policy1, [Config]).
+
+vhost_removed_while_updating_policy1(_Config) ->
+    VHost = "/vhost_removed_while_updating_policy-vhost",
+    PolicyName = "vhost_removed_while_updating_policy-policy",
+
+    ok = control_action(add_vhost, [VHost]),
+    ok = control_action_opts(
+      ["set_policy", "-p", VHost, PolicyName, ".*", "{\"ha-mode\":\"all\"}"]),
+    true = does_policy_exist(PolicyName, []),
+
+    %% Removing the vhost triggers the deletion of the policy. Once
+    %% the policy and the vhost are actually removed, RabbitMQ calls
+    %% update_policies() which lists policies on the given vhost. This
+    %% obviously fails because the vhost is gone, but the call should
+    %% still succeed.
+    ok = control_action(delete_vhost, [VHost]),
+    false = does_policy_exist(PolicyName, []),
+
+    passed.
+
+does_policy_exist(PolicyName, Props) ->
+    PolicyNameBin = list_to_binary(PolicyName),
+    Policies = lists:filter(
+      fun(Policy) ->
+          lists:member({name, PolicyNameBin}, Policy)
+      end, rabbit_policy:list()),
+    case Policies of
+        [Policy] -> check_policy_props(Policy, Props);
+        []       -> false;
+        _        -> false
+    end.
+
+check_policy_props(Policy, [Prop | Rest]) ->
+    case lists:member(Prop, Policy) of
+        true  -> check_policy_props(Policy, Rest);
+        false -> false
+    end;
+check_policy_props(_Policy, []) ->
+    true.
+
+server_status(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, server_status1, [Config]).
+
+server_status1(Config) ->
+    %% create a few things so there is some useful information to list
+    {_Writer, Limiter, Ch} = rabbit_ct_broker_helpers:test_channel(),
+    [Q, Q2] = [Queue || {Name, Owner} <- [{<<"server_status-q1">>, none},
+                                          {<<"server_status-q2">>, self()}],
+                        {new, Queue = #amqqueue{}} <-
+                            [rabbit_amqqueue:declare(
+                               rabbit_misc:r(<<"/">>, queue, Name),
+                               false, false, [], Owner)]],
+    ok = rabbit_amqqueue:basic_consume(
+           Q, true, Ch, Limiter, false, 0, <<"ctag">>, true, [], undefined),
+
+    %% list queues
+    ok = info_action(list_queues,
+      rabbit_amqqueue:info_keys(), true),
+
+    %% as we have no way to collect output of
+    %% info_action/3 call, the only way we
+    %% can test individual queueinfoitems is by directly calling
+    %% rabbit_amqqueue:info/2
+    [{exclusive, false}] = rabbit_amqqueue:info(Q, [exclusive]),
+    [{exclusive, true}] = rabbit_amqqueue:info(Q2, [exclusive]),
+
+    %% list exchanges
+    ok = info_action(list_exchanges,
+      rabbit_exchange:info_keys(), true),
+
+    %% list bindings
+    ok = info_action(list_bindings,
+      rabbit_binding:info_keys(), true),
+    %% misc binding listing APIs
+    [_|_] = rabbit_binding:list_for_source(
+              rabbit_misc:r(<<"/">>, exchange, <<"">>)),
+    [_] = rabbit_binding:list_for_destination(
+            rabbit_misc:r(<<"/">>, queue, <<"server_status-q1">>)),
+    [_] = rabbit_binding:list_for_source_and_destination(
+            rabbit_misc:r(<<"/">>, exchange, <<"">>),
+            rabbit_misc:r(<<"/">>, queue, <<"server_status-q1">>)),
+
+    %% list connections
+    H = ?config(rmq_hostname, Config),
+    P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+    {ok, C} = gen_tcp:connect(H, P, []),
+    gen_tcp:send(C, <<"AMQP", 0, 0, 9, 1>>),
+    timer:sleep(100),
+    ok = info_action(list_connections,
+      rabbit_networking:connection_info_keys(), false),
+    %% close_connection
+    [ConnPid] = rabbit_ct_broker_helpers:get_connection_pids([C]),
+    ok = control_action(close_connection,
+      [rabbit_misc:pid_to_string(ConnPid), "go away"]),
+
+    %% list channels
+    ok = info_action(list_channels, rabbit_channel:info_keys(), false),
+
+    %% list consumers
+    ok = control_action(list_consumers, []),
+
+    %% set vm memory high watermark
+    HWM = vm_memory_monitor:get_vm_memory_high_watermark(),
+    ok = control_action(set_vm_memory_high_watermark, ["1"]),
+    ok = control_action(set_vm_memory_high_watermark, ["1.0"]),
+    %% this will trigger an alarm
+    ok = control_action(set_vm_memory_high_watermark, ["0.0"]),
+    %% reset
+    ok = control_action(set_vm_memory_high_watermark, [float_to_list(HWM)]),
+
+    %% eval
+    {error_string, _} = control_action(eval, ["\""]),
+    {error_string, _} = control_action(eval, ["a("]),
+    ok = control_action(eval, ["a."]),
+
+    %% cleanup
+    [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]],
+
+    unlink(Ch),
+    ok = rabbit_channel:shutdown(Ch),
+
+    passed.
+
+amqp_connection_refusal(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, amqp_connection_refusal1, [Config]).
+
+amqp_connection_refusal1(Config) ->
+    H = ?config(rmq_hostname, Config),
+    P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+    [passed = test_amqp_connection_refusal(H, P, V) ||
+        V <- [<<"AMQP",9,9,9,9>>, <<"AMQP",0,1,0,0>>, <<"XXXX",0,0,9,1>>]],
+    passed.
+
+test_amqp_connection_refusal(H, P, Header) ->
+    {ok, C} = gen_tcp:connect(H, P, [binary, {active, false}]),
+    ok = gen_tcp:send(C, Header),
+    {ok, <<"AMQP",0,0,9,1>>} = gen_tcp:recv(C, 8, 100),
+    ok = gen_tcp:close(C),
+    passed.
+
+list_consumers_sanity_check(Config) ->
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+    Chan = rabbit_ct_client_helpers:open_channel(Config, A),
+    %% this queue is not cleaned up because the entire node is
+    %% reset between tests
+    QName = <<"list_consumers_q">>,
+    #'queue.declare_ok'{} = amqp_channel:call(Chan, #'queue.declare'{queue = QName}),
+
+    %% No consumers even if we have some queues
+    [] = rabbitmqctl_list_consumers(Config, A),
+
+    %% Several consumers on single channel should be correctly reported
+    #'basic.consume_ok'{consumer_tag = CTag1} = amqp_channel:call(Chan, #'basic.consume'{queue = QName}),
+    #'basic.consume_ok'{consumer_tag = CTag2} = amqp_channel:call(Chan, #'basic.consume'{queue = QName}),
+    true = (lists:sort([CTag1, CTag2]) =:=
+            lists:sort(rabbitmqctl_list_consumers(Config, A))),
+
+    %% `rabbitmqctl report` shares some code with `list_consumers`, so
+    %% check that it also reports both channels
+    {ok, ReportStdOut} = rabbit_ct_broker_helpers:rabbitmqctl(Config, A,
+      ["list_consumers"]),
+    ReportLines = re:split(ReportStdOut, <<"\n">>, [trim]),
+    ReportCTags = [lists:nth(3, re:split(Row, <<"\t">>)) || <<"list_consumers_q", _/binary>> = Row <- ReportLines],
+    true = (lists:sort([CTag1, CTag2]) =:=
+            lists:sort(ReportCTags)).
+
+rabbitmqctl_list_consumers(Config, Node) ->
+    {ok, StdOut} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Node,
+      ["list_consumers"]),
+    [<<"Listing consumers", _/binary>> | ConsumerRows] = re:split(StdOut, <<"\n">>, [trim]),
+    CTags = [ lists:nth(3, re:split(Row, <<"\t">>)) || Row <- ConsumerRows ],
+    CTags.
+
+list_queues_online_and_offline(Config) ->
+    [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+    ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+    %% Node B will be stopped
+    BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+    #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_1">>, durable = true}),
+    #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_2">>, durable = true}),
+    #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_1">>, durable = true}),
+    #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_2">>, durable = true}),
+
+    rabbit_ct_broker_helpers:rabbitmqctl(Config, B, ["stop"]),
+
+    GotUp = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+        ["list_queues", "--online", "name"])),
+    ExpectUp = [[<<"q_a_1">>], [<<"q_a_2">>]],
+    ExpectUp = GotUp,
+
+    GotDown = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+        ["list_queues", "--offline", "name"])),
+    ExpectDown = [[<<"q_b_1">>], [<<"q_b_2">>]],
+    ExpectDown = GotDown,
+
+    GotAll = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+        ["list_queues", "name"])),
+    ExpectAll = ExpectUp ++ ExpectDown,
+    ExpectAll = GotAll,
+
+    ok.
+
+%% -------------------------------------------------------------------
+%% Statistics.
+%% -------------------------------------------------------------------
+
+channel_statistics(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, channel_statistics1, [Config]).
+
+channel_statistics1(_Config) ->
+    application:set_env(rabbit, collect_statistics, fine),
+
+    %% ATM this just tests the queue / exchange stats in channels. That's
+    %% by far the most complex code though.
+
+    %% Set up a channel and queue
+    {_Writer, Ch} = test_spawn(),
+    rabbit_channel:do(Ch, #'queue.declare'{}),
+    QName = receive #'queue.declare_ok'{queue = Q0} -> Q0
+            after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+            end,
+    QRes = rabbit_misc:r(<<"/">>, queue, QName),
+    X = rabbit_misc:r(<<"/">>, exchange, <<"">>),
+
+    dummy_event_receiver:start(self(), [node()], [channel_stats]),
+
+    %% Check stats empty
+    Event = test_ch_statistics_receive_event(Ch, fun (_) -> true end),
+    [] = proplists:get_value(channel_queue_stats, Event),
+    [] = proplists:get_value(channel_exchange_stats, Event),
+    [] = proplists:get_value(channel_queue_exchange_stats, Event),
+
+    %% Publish and get a message
+    rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+                                           routing_key = QName},
+                      rabbit_basic:build_content(#'P_basic'{}, <<"">>)),
+    rabbit_channel:do(Ch, #'basic.get'{queue = QName}),
+
+    %% Check the stats reflect that
+    Event2 = test_ch_statistics_receive_event(
+               Ch,
+               fun (E) ->
+                       length(proplists:get_value(
+                                channel_queue_exchange_stats, E)) > 0
+               end),
+    [{QRes, [{get,1}]}] = proplists:get_value(channel_queue_stats,    Event2),
+    [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event2),
+    [{{QRes,X},[{publish,1}]}] =
+        proplists:get_value(channel_queue_exchange_stats, Event2),
+
+    %% Check the stats remove stuff on queue deletion
+    rabbit_channel:do(Ch, #'queue.delete'{queue = QName}),
+    Event3 = test_ch_statistics_receive_event(
+               Ch,
+               fun (E) ->
+                       length(proplists:get_value(
+                                channel_queue_exchange_stats, E)) == 0
+               end),
+
+    [] = proplists:get_value(channel_queue_stats, Event3),
+    [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event3),
+    [] = proplists:get_value(channel_queue_exchange_stats, Event3),
+
+    rabbit_channel:shutdown(Ch),
+    dummy_event_receiver:stop(),
+    passed.
+
+test_ch_statistics_receive_event(Ch, Matcher) ->
+    rabbit_channel:flush(Ch),
+    Ch ! emit_stats,
+    test_ch_statistics_receive_event1(Ch, Matcher).
+
+test_ch_statistics_receive_event1(Ch, Matcher) ->
+    receive #event{type = channel_stats, props = Props} ->
+            case Matcher(Props) of
+                true -> Props;
+                _    -> test_ch_statistics_receive_event1(Ch, Matcher)
+            end
+    after ?TIMEOUT -> throw(failed_to_receive_event)
+    end.
+
+head_message_timestamp_statistics(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, head_message_timestamp1, [Config]).
+
+head_message_timestamp1(_Config) ->
+    %% Can't find a way to receive the ack here so can't test pending acks status
+
+    application:set_env(rabbit, collect_statistics, fine),
+
+    %% Set up a channel and queue
+    {_Writer, Ch} = test_spawn(),
+    rabbit_channel:do(Ch, #'queue.declare'{}),
+    QName = receive #'queue.declare_ok'{queue = Q0} -> Q0
+            after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+            end,
+    QRes = rabbit_misc:r(<<"/">>, queue, QName),
+
+    {ok, Q1} = rabbit_amqqueue:lookup(QRes),
+    QPid = Q1#amqqueue.pid,
+
+    %% Set up event receiver for queue
+    dummy_event_receiver:start(self(), [node()], [queue_stats]),
+
+    %% Check timestamp is empty when queue is empty
+    Event1 = test_queue_statistics_receive_event(QPid, fun (E) -> proplists:get_value(name, E) == QRes end),
+    '' = proplists:get_value(head_message_timestamp, Event1),
+
+    %% Publish two messages and check timestamp is that of first message
+    rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+                                           routing_key = QName},
+                      rabbit_basic:build_content(#'P_basic'{timestamp = 1}, <<"">>)),
+    rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+                                           routing_key = QName},
+                      rabbit_basic:build_content(#'P_basic'{timestamp = 2}, <<"">>)),
+    Event2 = test_queue_statistics_receive_event(QPid, fun (E) -> proplists:get_value(name, E) == QRes end),
+    1 = proplists:get_value(head_message_timestamp, Event2),
+
+    %% Get first message and check timestamp is that of second message
+    rabbit_channel:do(Ch, #'basic.get'{queue = QName, no_ack = true}),
+    Event3 = test_queue_statistics_receive_event(QPid, fun (E) -> proplists:get_value(name, E) == QRes end),
+    2 = proplists:get_value(head_message_timestamp, Event3),
+
+    %% Get second message and check timestamp is empty again
+    rabbit_channel:do(Ch, #'basic.get'{queue = QName, no_ack = true}),
+    Event4 = test_queue_statistics_receive_event(QPid, fun (E) -> proplists:get_value(name, E) == QRes end),
+    '' = proplists:get_value(head_message_timestamp, Event4),
+
+    %% Teardown
+    rabbit_channel:do(Ch, #'queue.delete'{queue = QName}),
+    rabbit_channel:shutdown(Ch),
+    dummy_event_receiver:stop(),
+
+    passed.
+
+test_queue_statistics_receive_event(Q, Matcher) ->
+    %% Q ! emit_stats,
+    test_queue_statistics_receive_event1(Q, Matcher).
+
+test_queue_statistics_receive_event1(Q, Matcher) ->
+    receive #event{type = queue_stats, props = Props} ->
+            case Matcher(Props) of
+                true -> Props;
+                _    -> test_queue_statistics_receive_event1(Q, Matcher)
+            end
+    after ?TIMEOUT -> throw(failed_to_receive_event)
+    end.
+
+test_spawn() ->
+    {Writer, _Limiter, Ch} = rabbit_ct_broker_helpers:test_channel(),
+    ok = rabbit_channel:do(Ch, #'channel.open'{}),
+    receive #'channel.open_ok'{} -> ok
+    after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok)
+    end,
+    {Writer, Ch}.
+
+test_spawn(Node) ->
+    rpc:call(Node, ?MODULE, test_spawn_remote, []).
+
+%% Spawn an arbitrary long lived process, so we don't end up linking
+%% the channel to the short-lived process (RPC, here) spun up by the
+%% RPC server.
+test_spawn_remote() ->
+    RPC = self(),
+    spawn(fun () ->
+                  {Writer, Ch} = test_spawn(),
+                  RPC ! {Writer, Ch},
+                  link(Ch),
+                  receive
+                      _ -> ok
+                  end
+          end),
+    receive Res -> Res
+    after ?TIMEOUT  -> throw(failed_to_receive_result)
+    end.
+
+%% -------------------------------------------------------------------
+%% Topic matching.
+%% -------------------------------------------------------------------
+
+topic_matching(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, topic_matching1, [Config]).
+
+topic_matching1(_Config) ->
+    XName = #resource{virtual_host = <<"/">>,
+                      kind = exchange,
+                      name = <<"topic_matching-exchange">>},
+    X0 = #exchange{name = XName, type = topic, durable = false,
+                   auto_delete = false, arguments = []},
+    X = rabbit_exchange_decorator:set(X0),
+    %% create
+    rabbit_exchange_type_topic:validate(X),
+    exchange_op_callback(X, create, []),
+
+    %% add some bindings
+    Bindings = [#binding{source = XName,
+                         key = list_to_binary(Key),
+                         destination = #resource{virtual_host = <<"/">>,
+                                                 kind = queue,
+                                                 name = list_to_binary(Q)},
+                         args = Args} ||
+                   {Key, Q, Args} <- [{"a.b.c",         "t1",  []},
+                                      {"a.*.c",         "t2",  []},
+                                      {"a.#.b",         "t3",  []},
+                                      {"a.b.b.c",       "t4",  []},
+                                      {"#",             "t5",  []},
+                                      {"#.#",           "t6",  []},
+                                      {"#.b",           "t7",  []},
+                                      {"*.*",           "t8",  []},
+                                      {"a.*",           "t9",  []},
+                                      {"*.b.c",         "t10", []},
+                                      {"a.#",           "t11", []},
+                                      {"a.#.#",         "t12", []},
+                                      {"b.b.c",         "t13", []},
+                                      {"a.b.b",         "t14", []},
+                                      {"a.b",           "t15", []},
+                                      {"b.c",           "t16", []},
+                                      {"",              "t17", []},
+                                      {"*.*.*",         "t18", []},
+                                      {"vodka.martini", "t19", []},
+                                      {"a.b.c",         "t20", []},
+                                      {"*.#",           "t21", []},
+                                      {"#.*.#",         "t22", []},
+                                      {"*.#.#",         "t23", []},
+                                      {"#.#.#",         "t24", []},
+                                      {"*",             "t25", []},
+                                      {"#.b.#",         "t26", []},
+                                      {"args-test",     "t27",
+                                       [{<<"foo">>, longstr, <<"bar">>}]},
+                                      {"args-test",     "t27", %% Note aliasing
+                                       [{<<"foo">>, longstr, <<"baz">>}]}]],
+    lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end,
+                  Bindings),
+
+    %% test some matches
+    test_topic_expect_match(
+      X, [{"a.b.c",               ["t1", "t2", "t5", "t6", "t10", "t11", "t12",
+                                   "t18", "t20", "t21", "t22", "t23", "t24",
+                                   "t26"]},
+          {"a.b",                 ["t3", "t5", "t6", "t7", "t8", "t9", "t11",
+                                   "t12", "t15", "t21", "t22", "t23", "t24",
+                                   "t26"]},
+          {"a.b.b",               ["t3", "t5", "t6", "t7", "t11", "t12", "t14",
+                                   "t18", "t21", "t22", "t23", "t24", "t26"]},
+          {"",                    ["t5", "t6", "t17", "t24"]},
+          {"b.c.c",               ["t5", "t6", "t18", "t21", "t22", "t23",
+                                   "t24", "t26"]},
+          {"a.a.a.a.a",           ["t5", "t6", "t11", "t12", "t21", "t22",
+                                   "t23", "t24"]},
+          {"vodka.gin",           ["t5", "t6", "t8", "t21", "t22", "t23",
+                                   "t24"]},
+          {"vodka.martini",       ["t5", "t6", "t8", "t19", "t21", "t22", "t23",
+                                   "t24"]},
+          {"b.b.c",               ["t5", "t6", "t10", "t13", "t18", "t21",
+                                   "t22", "t23", "t24", "t26"]},
+          {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]},
+          {"oneword",             ["t5", "t6", "t21", "t22", "t23", "t24",
+                                   "t25"]},
+          {"args-test",           ["t5", "t6", "t21", "t22", "t23", "t24",
+                                   "t25", "t27"]}]),
+    %% remove some bindings
+    RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings),
+                       lists:nth(11, Bindings), lists:nth(19, Bindings),
+                       lists:nth(21, Bindings), lists:nth(28, Bindings)],
+    exchange_op_callback(X, remove_bindings, [RemovedBindings]),
+    RemainingBindings = ordsets:to_list(
+                          ordsets:subtract(ordsets:from_list(Bindings),
+                                           ordsets:from_list(RemovedBindings))),
+
+    %% test some matches
+    test_topic_expect_match(
+      X,
+      [{"a.b.c",               ["t2", "t6", "t10", "t12", "t18", "t20", "t22",
+                                "t23", "t24", "t26"]},
+       {"a.b",                 ["t3", "t6", "t7", "t8", "t9", "t12", "t15",
+                                "t22", "t23", "t24", "t26"]},
+       {"a.b.b",               ["t3", "t6", "t7", "t12", "t14", "t18", "t22",
+                                "t23", "t24", "t26"]},
+       {"",                    ["t6", "t17", "t24"]},
+       {"b.c.c",               ["t6", "t18", "t22", "t23", "t24", "t26"]},
+       {"a.a.a.a.a",           ["t6", "t12", "t22", "t23", "t24"]},
+       {"vodka.gin",           ["t6", "t8", "t22", "t23", "t24"]},
+       {"vodka.martini",       ["t6", "t8", "t22", "t23", "t24"]},
+       {"b.b.c",               ["t6", "t10", "t13", "t18", "t22", "t23",
+                                "t24", "t26"]},
+       {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]},
+       {"oneword",             ["t6", "t22", "t23", "t24", "t25"]},
+       {"args-test",           ["t6", "t22", "t23", "t24", "t25", "t27"]}]),
+
+    %% remove the entire exchange
+    exchange_op_callback(X, delete, [RemainingBindings]),
+    %% none should match now
+    test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]),
+    passed.
+
+exchange_op_callback(X, Fun, Args) ->
+    rabbit_misc:execute_mnesia_transaction(
+      fun () -> rabbit_exchange:callback(X, Fun, transaction, [X] ++ Args) end),
+    rabbit_exchange:callback(X, Fun, none, [X] ++ Args).
+
+test_topic_expect_match(X, List) ->
+    lists:foreach(
+      fun ({Key, Expected}) ->
+              BinKey = list_to_binary(Key),
+              Message = rabbit_basic:message(X#exchange.name, BinKey,
+                                             #'P_basic'{}, <<>>),
+              Res = rabbit_exchange_type_topic:route(
+                      X, #delivery{mandatory = false,
+                                   sender    = self(),
+                                   message   = Message}),
+              ExpectedRes = lists:map(
+                              fun (Q) -> #resource{virtual_host = <<"/">>,
+                                                   kind = queue,
+                                                   name = list_to_binary(Q)}
+                              end, Expected),
+              true = (lists:usort(ExpectedRes) =:= lists:usort(Res))
+      end, List).
+
+%% ---------------------------------------------------------------------------
+%% Unordered tests (originally from rabbit_tests.erl).
+%% ---------------------------------------------------------------------------
+
+confirms(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, confirms1, [Config]).
+
+confirms1(_Config) ->
+    {_Writer, Ch} = test_spawn(),
+    DeclareBindDurableQueue =
+        fun() ->
+                rabbit_channel:do(Ch, #'queue.declare'{durable = true}),
+                receive #'queue.declare_ok'{queue = Q0} ->
+                        rabbit_channel:do(Ch, #'queue.bind'{
+                                                 queue = Q0,
+                                                 exchange = <<"amq.direct">>,
+                                                 routing_key = "confirms-magic" }),
+                        receive #'queue.bind_ok'{} -> Q0
+                        after ?TIMEOUT -> throw(failed_to_bind_queue)
+                        end
+                after ?TIMEOUT -> throw(failed_to_declare_queue)
+                end
+        end,
+    %% Declare and bind two queues
+    QName1 = DeclareBindDurableQueue(),
+    QName2 = DeclareBindDurableQueue(),
+    %% Get the first one's pid (we'll crash it later)
+    {ok, Q1} = rabbit_amqqueue:lookup(rabbit_misc:r(<<"/">>, queue, QName1)),
+    QPid1 = Q1#amqqueue.pid,
+    %% Enable confirms
+    rabbit_channel:do(Ch, #'confirm.select'{}),
+    receive
+        #'confirm.select_ok'{} -> ok
+    after ?TIMEOUT -> throw(failed_to_enable_confirms)
+    end,
+    %% Publish a message
+    rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>,
+                                           routing_key = "confirms-magic"
+                                          },
+                      rabbit_basic:build_content(
+                        #'P_basic'{delivery_mode = 2}, <<"">>)),
+    %% We must not kill the queue before the channel has processed the
+    %% 'publish'.
+    ok = rabbit_channel:flush(Ch),
+    %% Crash the queue
+    QPid1 ! boom,
+    %% Wait for a nack
+    receive
+        #'basic.nack'{} -> ok;
+        #'basic.ack'{}  -> throw(received_ack_instead_of_nack)
+    after ?TIMEOUT-> throw(did_not_receive_nack)
+    end,
+    receive
+        #'basic.ack'{} -> throw(received_ack_when_none_expected)
+    after 1000 -> ok
+    end,
+    %% Cleanup
+    rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}),
+    receive
+        #'queue.delete_ok'{} -> ok
+    after ?TIMEOUT -> throw(failed_to_cleanup_queue)
+    end,
+    unlink(Ch),
+    ok = rabbit_channel:shutdown(Ch),
+
+    passed.
+
+gen_server2_with_state(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, gen_server2_with_state1, [Config]).
+
+gen_server2_with_state1(_Config) ->
+    fhc_state = gen_server2:with_state(file_handle_cache,
+                                       fun (S) -> element(1, S) end),
+    passed.
+
+mcall(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, mcall1, [Config]).
+
+mcall1(_Config) ->
+    P1 = spawn(fun gs2_test_listener/0),
+    register(foo, P1),
+    global:register_name(gfoo, P1),
+
+    P2 = spawn(fun() -> exit(bang) end),
+    %% ensure P2 is dead (ignore the race setting up the monitor)
+    await_exit(P2),
+
+    P3 = spawn(fun gs2_test_crasher/0),
+
+    %% since P2 crashes almost immediately and P3 after receiving its first
+    %% message, we have to spawn a few more processes to handle the additional
+    %% cases we're interested in here
+    register(baz, spawn(fun gs2_test_crasher/0)),
+    register(bog, spawn(fun gs2_test_crasher/0)),
+    global:register_name(gbaz, spawn(fun gs2_test_crasher/0)),
+
+    NoNode = rabbit_nodes:make("nonode"),
+
+    Targets =
+        %% pids
+        [P1, P2, P3]
+        ++
+        %% registered names
+        [foo, bar, baz]
+        ++
+        %% {Name, Node} pairs
+        [{foo, node()}, {bar, node()}, {bog, node()}, {foo, NoNode}]
+        ++
+        %% {global, Name}
+        [{global, gfoo}, {global, gbar}, {global, gbaz}],
+
+    GoodResults = [{D, goodbye} || D <- [P1, foo,
+                                         {foo, node()},
+                                         {global, gfoo}]],
+
+    BadResults  = [{P2,             noproc},   % died before use
+                   {P3,             boom},     % died on first use
+                   {bar,            noproc},   % never registered
+                   {baz,            boom},     % died on first use
+                   {{bar, node()},  noproc},   % never registered
+                   {{bog, node()},  boom},     % died on first use
+                   {{foo, NoNode},  nodedown}, % invalid node
+                   {{global, gbar}, noproc},   % never registered globally
+                   {{global, gbaz}, boom}],    % died on first use
+
+    {Replies, Errors} = gen_server2:mcall([{T, hello} || T <- Targets]),
+    true = lists:sort(Replies) == lists:sort(GoodResults),
+    true = lists:sort(Errors)  == lists:sort(BadResults),
+
+    %% cleanup (ignore the race setting up the monitor)
+    P1 ! stop,
+    await_exit(P1),
+    passed.
+
+await_exit(Pid) ->
+    MRef = erlang:monitor(process, Pid),
+    receive
+        {'DOWN', MRef, _, _, _} -> ok
+    end.
+
+gs2_test_crasher() ->
+    receive
+        {'$gen_call', _From, hello} -> exit(boom)
+    end.
+
+gs2_test_listener() ->
+    receive
+        {'$gen_call', From, hello} ->
+            gen_server2:reply(From, goodbye),
+            gs2_test_listener();
+        stop ->
+            ok
+    end.
+
+configurable_server_properties(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, configurable_server_properties1, [Config]).
+
+configurable_server_properties1(_Config) ->
+    %% List of the names of the built-in properties do we expect to find
+    BuiltInPropNames = [<<"product">>, <<"version">>, <<"platform">>,
+                        <<"copyright">>, <<"information">>],
+
+    Protocol = rabbit_framing_amqp_0_9_1,
+
+    %% Verify that the built-in properties are initially present
+    ActualPropNames = [Key || {Key, longstr, _} <-
+                                  rabbit_reader:server_properties(Protocol)],
+    true = lists:all(fun (X) -> lists:member(X, ActualPropNames) end,
+                     BuiltInPropNames),
+
+    %% Get the initial server properties configured in the environment
+    {ok, ServerProperties} = application:get_env(rabbit, server_properties),
+
+    %% Helper functions
+    ConsProp = fun (X) -> application:set_env(rabbit,
+                                              server_properties,
+                                              [X | ServerProperties]) end,
+    IsPropPresent =
+        fun (X) ->
+                lists:member(X, rabbit_reader:server_properties(Protocol))
+        end,
+
+    %% Add a wholly new property of the simplified {KeyAtom, StringValue} form
+    NewSimplifiedProperty = {NewHareKey, NewHareVal} = {hare, "soup"},
+    ConsProp(NewSimplifiedProperty),
+    %% Do we find hare soup, appropriately formatted in the generated properties?
+    ExpectedHareImage = {list_to_binary(atom_to_list(NewHareKey)),
+                         longstr,
+                         list_to_binary(NewHareVal)},
+    true = IsPropPresent(ExpectedHareImage),
+
+    %% Add a wholly new property of the {BinaryKey, Type, Value} form
+    %% and check for it
+    NewProperty = {<<"new-bin-key">>, signedint, -1},
+    ConsProp(NewProperty),
+    %% Do we find the new property?
+    true = IsPropPresent(NewProperty),
+
+    %% Add a property that clobbers a built-in, and verify correct clobbering
+    {NewVerKey, NewVerVal} = NewVersion = {version, "X.Y.Z."},
+    {BinNewVerKey, BinNewVerVal} = {list_to_binary(atom_to_list(NewVerKey)),
+                                    list_to_binary(NewVerVal)},
+    ConsProp(NewVersion),
+    ClobberedServerProps = rabbit_reader:server_properties(Protocol),
+    %% Is the clobbering insert present?
+    true = IsPropPresent({BinNewVerKey, longstr, BinNewVerVal}),
+    %% Is the clobbering insert the only thing with the clobbering key?
+    [{BinNewVerKey, longstr, BinNewVerVal}] =
+        [E || {K, longstr, _V} = E <- ClobberedServerProps, K =:= BinNewVerKey],
+
+    application:set_env(rabbit, server_properties, ServerProperties),
+    passed.
+
+memory_high_watermark(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, memory_high_watermark1, [Config]).
+
+memory_high_watermark1(_Config) ->
+    %% set vm memory high watermark
+    HWM = vm_memory_monitor:get_vm_memory_high_watermark(),
+    %% this will trigger an alarm
+    ok = control_action(set_vm_memory_high_watermark,
+      ["absolute", "2000"]),
+    [{{resource_limit,memory,_},[]}] = rabbit_alarm:get_alarms(),
+    %% reset
+    ok = control_action(set_vm_memory_high_watermark,
+      [float_to_list(HWM)]),
+
+    passed.
+
+set_disk_free_limit_command(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, set_disk_free_limit_command1, [Config]).
+
+set_disk_free_limit_command1(_Config) ->
+    ok = control_action(set_disk_free_limit,
+      ["2000kiB"]),
+    2048000 = rabbit_disk_monitor:get_disk_free_limit(),
+    ok = control_action(set_disk_free_limit,
+      ["mem_relative", "1.1"]),
+    ExpectedLimit = 1.1 * vm_memory_monitor:get_total_memory(),
+    % Total memory is unstable, so checking order
+    true = ExpectedLimit/rabbit_disk_monitor:get_disk_free_limit() < 1.2,
+    true = ExpectedLimit/rabbit_disk_monitor:get_disk_free_limit() > 0.98,
+    ok = control_action(set_disk_free_limit, ["50MB"]),
+    passed.
+
+disk_monitor(Config) ->
+    passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+      ?MODULE, disk_monitor1, [Config]).
+
+disk_monitor1(_Config) ->
+    %% Issue: rabbitmq-server #91
+    %% os module could be mocked using 'unstick', however it may have undesired
+    %% side effects in following tests. Thus, we mock at rabbit_misc level
+    ok = meck:new(rabbit_misc, [passthrough]),
+    ok = meck:expect(rabbit_misc, os_cmd, fun(_) -> "\n" end),
+    ok = rabbit_sup:stop_child(rabbit_disk_monitor_sup),
+    ok = rabbit_sup:start_delayed_restartable_child(rabbit_disk_monitor, [1000]),
+    meck:unload(rabbit_misc),
+    passed.
+
+disconnect_detected_during_alarm(Config) ->
+    A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+    %% Set a low memory high watermark.
+    rabbit_ct_broker_helpers:rabbitmqctl(Config, A,
+      ["set_vm_memory_high_watermark", "0.000000001"]),
+
+    %% Open a connection and a channel.
+    Port = rabbit_ct_broker_helpers:get_node_config(Config, A, tcp_port_amqp),
+    Heartbeat = 1,
+    {ok, Conn} = amqp_connection:start(
+      #amqp_params_network{port = Port,
+                           heartbeat = Heartbeat}),
+    {ok, Ch} = amqp_connection:open_channel(Conn),
+
+    amqp_connection:register_blocked_handler(Conn, self()),
+    Publish = #'basic.publish'{routing_key = <<"nowhere-to-go">>},
+    amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}),
+    receive
+        % Check that connection was indeed blocked
+        #'connection.blocked'{} -> ok
+    after
+        1000 -> exit(connection_was_not_blocked)
+    end,
+
+    %% Connection is blocked, now we should forcefully kill it
+    {'EXIT', _} = (catch amqp_connection:close(Conn, 10)),
+
+    ListConnections =
+        fun() ->
+            rpc:call(A, rabbit_networking, connection_info_all, [])
+        end,
+
+    %% We've already disconnected, but blocked connection still should still linger on.
+    [SingleConn] = ListConnections(),
+    blocked = rabbit_misc:pget(state, SingleConn),
+
+    %% It should definitely go away after 2 heartbeat intervals.
+    timer:sleep(round(2.5 * 1000 * Heartbeat)),
+    [] = ListConnections(),
+
+    passed.
+
+%% ---------------------------------------------------------------------------
+%% Cluster-dependent tests.
+%% ---------------------------------------------------------------------------
+
+delegates_async(Config) ->
+    {I, J} = ?config(test_direction, Config),
+    From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+    To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+    rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+    passed = rabbit_ct_broker_helpers:rpc(Config,
+      From, ?MODULE, delegates_async1, [Config, To]).
+
+delegates_async1(_Config, SecondaryNode) ->
+    Self = self(),
+    Sender = fun (Pid) -> Pid ! {invoked, Self} end,
+
+    Responder = make_responder(fun ({invoked, Pid}) -> Pid ! response end),
+
+    ok = delegate:invoke_no_result(spawn(Responder), Sender),
+    ok = delegate:invoke_no_result(spawn(SecondaryNode, Responder), Sender),
+    await_response(2),
+
+    LocalPids = spawn_responders(node(), Responder, 10),
+    RemotePids = spawn_responders(SecondaryNode, Responder, 10),
+    ok = delegate:invoke_no_result(LocalPids ++ RemotePids, Sender),
+    await_response(20),
+
+    passed.
+
+delegates_sync(Config) ->
+    {I, J} = ?config(test_direction, Config),
+    From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+    To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+    rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+    passed = rabbit_ct_broker_helpers:rpc(Config,
+      From, ?MODULE, delegates_sync1, [Config, To]).
+
+delegates_sync1(_Config, SecondaryNode) ->
+    Sender = fun (Pid) -> gen_server:call(Pid, invoked, infinity) end,
+    BadSender = fun (_Pid) -> exit(exception) end,
+
+    Responder = make_responder(fun ({'$gen_call', From, invoked}) ->
+                                       gen_server:reply(From, response)
+                               end),
+
+    BadResponder = make_responder(fun ({'$gen_call', From, invoked}) ->
+                                          gen_server:reply(From, response)
+                                  end, bad_responder_died),
+
+    response = delegate:invoke(spawn(Responder), Sender),
+    response = delegate:invoke(spawn(SecondaryNode, Responder), Sender),
+
+    must_exit(fun () -> delegate:invoke(spawn(BadResponder), BadSender) end),
+    must_exit(fun () ->
+                      delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end),
+
+    LocalGoodPids = spawn_responders(node(), Responder, 2),
+    RemoteGoodPids = spawn_responders(SecondaryNode, Responder, 2),
+    LocalBadPids = spawn_responders(node(), BadResponder, 2),
+    RemoteBadPids = spawn_responders(SecondaryNode, BadResponder, 2),
+
+    {GoodRes, []} = delegate:invoke(LocalGoodPids ++ RemoteGoodPids, Sender),
+    true = lists:all(fun ({_, response}) -> true end, GoodRes),
+    GoodResPids = [Pid || {Pid, _} <- GoodRes],
+
+    Good = lists:usort(LocalGoodPids ++ RemoteGoodPids),
+    Good = lists:usort(GoodResPids),
+
+    {[], BadRes} = delegate:invoke(LocalBadPids ++ RemoteBadPids, BadSender),
+    true = lists:all(fun ({_, {exit, exception, _}}) -> true end, BadRes),
+    BadResPids = [Pid || {Pid, _} <- BadRes],
+
+    Bad = lists:usort(LocalBadPids ++ RemoteBadPids),
+    Bad = lists:usort(BadResPids),
+
+    MagicalPids = [rabbit_misc:string_to_pid(Str) ||
+                      Str <- ["<nonode@nohost.0.1.0>", "<nonode@nohost.0.2.0>"]],
+    {[], BadNodes} = delegate:invoke(MagicalPids, Sender),
+    true = lists:all(
+             fun ({_, {exit, {nodedown, nonode@nohost}, _Stack}}) -> true end,
+             BadNodes),
+    BadNodesPids = [Pid || {Pid, _} <- BadNodes],
+
+    Magical = lists:usort(MagicalPids),
+    Magical = lists:usort(BadNodesPids),
+
+    passed.
+
+queue_cleanup(Config) ->
+    {I, J} = ?config(test_direction, Config),
+    From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+    To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+    rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+    passed = rabbit_ct_broker_helpers:rpc(Config,
+      From, ?MODULE, queue_cleanup1, [Config, To]).
+
+queue_cleanup1(_Config, _SecondaryNode) ->
+    {_Writer, Ch} = test_spawn(),
+    rabbit_channel:do(Ch, #'queue.declare'{ queue = ?CLEANUP_QUEUE_NAME }),
+    receive #'queue.declare_ok'{queue = ?CLEANUP_QUEUE_NAME} ->
+            ok
+    after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+    end,
+    rabbit_channel:shutdown(Ch),
+    rabbit:stop(),
+    rabbit:start(),
+    {_Writer2, Ch2} = test_spawn(),
+    rabbit_channel:do(Ch2, #'queue.declare'{ passive = true,
+                                             queue   = ?CLEANUP_QUEUE_NAME }),
+    receive
+        #'channel.close'{reply_code = ?NOT_FOUND} ->
+            ok
+    after ?TIMEOUT -> throw(failed_to_receive_channel_exit)
+    end,
+    rabbit_channel:shutdown(Ch2),
+    passed.
+
+declare_on_dead_queue(Config) ->
+    {I, J} = ?config(test_direction, Config),
+    From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+    To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+    rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+    passed = rabbit_ct_broker_helpers:rpc(Config,
+      From, ?MODULE, declare_on_dead_queue1, [Config, To]).
+
+declare_on_dead_queue1(_Config, SecondaryNode) ->
+    QueueName = rabbit_misc:r(<<"/">>, queue, ?CLEANUP_QUEUE_NAME),
+    Self = self(),
+    Pid = spawn(SecondaryNode,
+                fun () ->
+                        {new, #amqqueue{name = QueueName, pid = QPid}} =
+                            rabbit_amqqueue:declare(QueueName, false, false, [],
+                                                    none),
+                        exit(QPid, kill),
+                        Self ! {self(), killed, QPid}
+                end),
+    receive
+        {Pid, killed, OldPid} ->
+            Q = dead_queue_loop(QueueName, OldPid),
+            {ok, 0} = rabbit_amqqueue:delete(Q, false, false),
+            passed
+    after ?TIMEOUT -> throw(failed_to_create_and_kill_queue)
+    end.
+
+refresh_events(Config) ->
+    {I, J} = ?config(test_direction, Config),
+    From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+    To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+    rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+    passed = rabbit_ct_broker_helpers:rpc(Config,
+      From, ?MODULE, refresh_events1, [Config, To]).
+
+refresh_events1(Config, SecondaryNode) ->
+    dummy_event_receiver:start(self(), [node(), SecondaryNode],
+                               [channel_created, queue_created]),
+
+    {_Writer, Ch} = test_spawn(),
+    expect_events(pid, Ch, channel_created),
+    rabbit_channel:shutdown(Ch),
+
+    {_Writer2, Ch2} = test_spawn(SecondaryNode),
+    expect_events(pid, Ch2, channel_created),
+    rabbit_channel:shutdown(Ch2),
+
+    {new, #amqqueue{name = QName} = Q} =
+        rabbit_amqqueue:declare(queue_name(Config, <<"refresh_events-q">>),
+                                false, false, [], none),
+    expect_events(name, QName, queue_created),
+    rabbit_amqqueue:delete(Q, false, false),
+
+    dummy_event_receiver:stop(),
+    passed.
+
+make_responder(FMsg) -> make_responder(FMsg, timeout).
+make_responder(FMsg, Throw) ->
+    fun () ->
+            receive Msg -> FMsg(Msg)
+            after ?TIMEOUT -> throw(Throw)
+            end
+    end.
+
+spawn_responders(Node, Responder, Count) ->
+    [spawn(Node, Responder) || _ <- lists:seq(1, Count)].
+
+await_response(0) ->
+    ok;
+await_response(Count) ->
+    receive
+        response -> ok,
+                    await_response(Count - 1)
+    after ?TIMEOUT -> throw(timeout)
+    end.
+
+must_exit(Fun) ->
+    try
+        Fun(),
+        throw(exit_not_thrown)
+    catch
+        exit:_ -> ok
+    end.
+
+dead_queue_loop(QueueName, OldPid) ->
+    {existing, Q} = rabbit_amqqueue:declare(QueueName, false, false, [], none),
+    case Q#amqqueue.pid of
+        OldPid -> timer:sleep(25),
+                  dead_queue_loop(QueueName, OldPid);
+        _      -> true = rabbit_misc:is_process_alive(Q#amqqueue.pid),
+                  Q
+    end.
+
+expect_events(Tag, Key, Type) ->
+    expect_event(Tag, Key, Type),
+    rabbit:force_event_refresh(make_ref()),
+    expect_event(Tag, Key, Type).
+
+expect_event(Tag, Key, Type) ->
+    receive #event{type = Type, props = Props} ->
+            case rabbit_misc:pget(Tag, Props) of
+                Key -> ok;
+                _   -> expect_event(Tag, Key, Type)
+            end
+    after ?TIMEOUT -> throw({failed_to_receive_event, Type})
+    end.
+
+%% ---------------------------------------------------------------------------
+%% rabbitmqctl helpers.
+%% ---------------------------------------------------------------------------
+
+control_action(Command, Args) ->
+    control_action(Command, node(), Args, default_options()).
+
+control_action(Command, Args, NewOpts) ->
+    control_action(Command, node(), Args,
+                   expand_options(default_options(), NewOpts)).
+
+control_action(Command, Node, Args, Opts) ->
+    case catch rabbit_control_main:action(
+                 Command, Node, Args, Opts,
+                 fun (Format, Args1) ->
+                         io:format(Format ++ " ...~n", Args1)
+                 end) of
+        ok ->
+            io:format("done.~n"),
+            ok;
+        {ok, Result} ->
+            rabbit_control_misc:print_cmd_result(Command, Result),
+            ok;
+        Other ->
+            io:format("failed: ~p~n", [Other]),
+            Other
+    end.
+
+control_action_t(Command, Args, Timeout) when is_number(Timeout) ->
+    control_action_t(Command, node(), Args, default_options(), Timeout).
+
+control_action_t(Command, Args, NewOpts, Timeout) when is_number(Timeout) ->
+    control_action_t(Command, node(), Args,
+                     expand_options(default_options(), NewOpts),
+                     Timeout).
+
+control_action_t(Command, Node, Args, Opts, Timeout) when is_number(Timeout) ->
+    case catch rabbit_control_main:action(
+                 Command, Node, Args, Opts,
+                 fun (Format, Args1) ->
+                         io:format(Format ++ " ...~n", Args1)
+                 end, Timeout) of
+        ok ->
+            io:format("done.~n"),
+            ok;
+        {ok, Result} ->
+            rabbit_control_misc:print_cmd_result(Command, Result),
+            ok;
+        Other ->
+            io:format("failed: ~p~n", [Other]),
+            Other
+    end.
+
+control_action_opts(Raw) ->
+    NodeStr = atom_to_list(node()),
+    case rabbit_control_main:parse_arguments(Raw, NodeStr) of
+        {ok, {Cmd, Opts, Args}} ->
+            case control_action(Cmd, node(), Args, Opts) of
+                ok    -> ok;
+                Error -> Error
+            end;
+        Error ->
+            Error
+    end.
+
+info_action(Command, Args, CheckVHost) ->
+    ok = control_action(Command, []),
+    if CheckVHost -> ok = control_action(Command, [], ["-p", "/"]);
+       true       -> ok
+    end,
+    ok = control_action(Command, lists:map(fun atom_to_list/1, Args)),
+    {bad_argument, dummy} = control_action(Command, ["dummy"]),
+    ok.
+
+info_action_t(Command, Args, CheckVHost, Timeout) when is_number(Timeout) ->
+    if CheckVHost -> ok = control_action_t(Command, [], ["-p", "/"], Timeout);
+       true       -> ok
+    end,
+    ok = control_action_t(Command, lists:map(fun atom_to_list/1, Args), Timeout),
+    ok.
+
+default_options() -> [{"-p", "/"}, {"-q", "false"}].
+
+expand_options(As, Bs) ->
+    lists:foldl(fun({K, _}=A, R) ->
+                        case proplists:is_defined(K, R) of
+                            true -> R;
+                            false -> [A | R]
+                        end
+                end, Bs, As).
index 5178de139d326aedd33087e0c1f1ebf39bd63cb7..c0684077d8a087c0affb47afd9f1c4458cc6a4f9 100755 (executable)
@@ -5,12 +5,28 @@ set -o errexit
 set -o pipefail
 
 PACKAGE="${1:?Usage: $0 <package-name>}"
-rabbit_pid_path=/var/lib/rabbitmq/rabbit.pid
+
+# Should be in separate directory, init script is creating/removing this dir
+rabbit_pid_path=/var/lib/rabbitmq/rabbitmq-pid-dir-for-tests/rabbit.pid
+mkdir -p $(dirname $rabbit_pid_path)
+chmod a+rwX $(dirname $rabbit_pid_path)
+
+if command -v systemctl > /dev/null; then
+    HAS_SYSTEMD=true
+else
+    HAS_SYSTEMD=false
+fi
 
 if [[ `cat /etc/*-release | head -n 1 | awk '{print $1}'` =~ Ubuntu ]]; then
     OS=ubuntu
+    INSTALL_SHOULD_START=false
+    SHOULD_USE_HIPE=true
+    EPMD_UNIT=epmd
 else
     OS=redhat
+    INSTALL_SHOULD_START=false
+    SHOULD_USE_HIPE=false
+    EPMD_UNIT=epmd@0.0.0.0
 fi
 
 install-packages-ubuntu() {
@@ -61,6 +77,7 @@ enable-management-plugin() {
 }
 
 show-rabbit-startup-messages() {
+    set +e
     case $OS in
         ubuntu)
             echo ==== /var/log/rabbitmq/startup_log
@@ -70,8 +87,11 @@ show-rabbit-startup-messages() {
             ;;
         redhat)
             journalctl _SYSTEMD_UNIT=rabbitmq-server.service
+            systemctl status rabbitmq-server.service
+            journalctl -xe
             ;;
     esac
+    set -e
 }
 
 configure-rabbitmq-server() {
@@ -79,14 +99,31 @@ configure-rabbitmq-server() {
     cat <<EOF > /etc/rabbitmq/rabbitmq-env.conf
 PID_FILE=$rabbit_pid_path
 NODENAME=rabbit@localhost
+SERVER_CODE_PATH=/var/lib/rabbitmq/native-code
 EOF
 }
 
+show-rabbit-logs() {
+    set +e
+    local log_file
+    ls -la /var/log/* || true
+    ls -la /var/lib/rabbitmq/* || true
+    ls -lad /tmp
+    for log_file in /var/log/rabbitmq/*.log; do
+        if [[ -e $log_file ]]; then
+            banner "Showing $log_file"
+            tail -n1000 $log_file
+        fi
+    done
+    set -e
+}
+
 start-rabbitmq-server() {
     pkill -f beam || true
     pkill -f epmd || true
     if ! start-service rabbitmq-server; then
         show-rabbit-startup-messages
+        show-rabbit-logs
         return 1
     fi
     wait-rabbitmq
@@ -130,8 +167,16 @@ rabbitmq-management-aliveness-test() {
     test "x$check_result" == xok
 }
 
-report-hipe-status() {
-    run-ctl eval "lists:module_info(native)."
+ensure-hipe-compiled() {
+    if [[ $SHOULD_USE_HIPE != true ]]; then
+        return 0
+    fi
+    local is_compiled
+    is_compiled=$(run-ctl eval "lists:module_info(native).")
+    if [[ $is_compiled != true ]]; then
+        echo "RabbitMQ is not using HiPE-compiled modules"
+        exit 1
+    fi
 }
 
 # rabbit may fail to notify systemd about successfull startup,
@@ -169,20 +214,122 @@ validate-erlang-thread-pool-size() {
     fi
 }
 
+# Our package should not start rabbit
+check-whether-rabbit-is-running-after-installation() {
+    local running_rc
+    set +e
+    pgrep -f beam > /dev/null
+    running_rc=$?
+    set -e
+
+    if [[ $INSTALL_SHOULD_START == true ]]; then
+        if [[ $running_rc -ne 0 ]]; then
+            echo "Installation process hasn't started rabbit, when it should have"
+            return 1
+        fi
+    else
+        if [[ $running_rc -eq 0 ]]; then
+            echo "Installation process has started rabbit, when it shoudn't have"
+            return 1
+        fi
+    fi
+}
+
+# Our package should not enable autostart of rabbti
+check-whether-rabbit-is-enabled-after-installation() {
+    local is_enabled=false
+    if command -v systemctl > /dev/null; then
+        if systemctl is-enabled rabbitmq-server > /dev/null 2>&1 ; then
+            is_enabled=true
+        fi
+    else
+        if [[ $(ls /etc/rc2.d/S*rabbitmq-server | wc -l) -ne 0 ]]; then
+            is_enabled=true
+        fi
+    fi
+    if [[ $INSTALL_SHOULD_START == true ]]; then
+        if [[ $is_enabled != true ]]; then
+            echo "Rabbitmq service wasn't enabled during installation, when it should"
+            return 1
+        fi
+    else
+        if [[ $is_enabled != false ]]; then
+            echo "Rabbit service was enabled during installation, when it shouldn't"
+        fi
+    fi
+}
+
+epmd-should-be-managed-by-systemd() {
+    if [[ $HAS_SYSTEMD != true ]]; then
+        return 0
+    fi
+    stop-service rabbitmq-server
+    systemctl stop epmd.socket
+    systemctl stop epmd.service
+    pkill -f epmd || true
+    start-service rabbitmq-server
+    if systemctl status rabitmq-server | grep -q epmd ; then
+        echo "epmd is running in same systemd unit as rabbitmq"
+        systemctl status
+        systemctl status $EPMD_UNIT
+        exit 1
+    fi
+    epmd-should-be-running-under-systemd
+}
+
+epmd-should-be-running-under-systemd() {
+    if ! systemctl status $EPMD_UNIT | grep -q -P 'Main PID: \d+ \(epmd\)'; then
+        echo "epmd is not running as a standalone systemd unit, which is very strange"
+        systemctl status
+        systemctl status $EPMD_UNIT
+        exit 1
+    fi
+}
+
+postinst-started-epmd-should-be-managed-by-systemd() {
+    if [[ $HAS_SYSTEMD != true ]]; then
+        return 0
+    fi
+    # If installation procedure started epmd (because of
+    # HiPE-compilation patch), we need to verify that it was
+    # done through systemd.
+    if pgrep epmd; then
+        epmd-should-be-running-under-systemd
+    fi
+}
+
+banner() {
+    set +x
+    echo
+    echo ================================================================================
+    echo "$@"
+    echo ================================================================================
+    echo
+    set -x
+}
+
+step() {
+    banner "STEP: $@"
+    "$@"
+}
+
 case $PACKAGE in
     rabbitmq-server)
-        install-helper-packages
-        install-packages rabbitmq-server
-        stop-service rabbitmq-server
-        configure-rabbitmq-server
-        enable-management-plugin
-        start-rabbitmq-server
-        test-logrotate-sanity
-        rabbitmq-health-check
-        rabbitmq-management-aliveness-test
-        report-hipe-status
-        validate-erlang-thread-pool-size
-        test-repeated-restart
+        step install-helper-packages
+        step postinst-started-epmd-should-be-managed-by-systemd
+        step check-whether-rabbit-is-running-after-installation
+        step check-whether-rabbit-is-enabled-after-installation
+        step stop-service rabbitmq-server
+        step configure-rabbitmq-server
+        step enable-management-plugin
+        step start-rabbitmq-server
+        step test-logrotate-sanity
+        step rabbitmq-health-check
+        step rabbitmq-management-aliveness-test
+        step ensure-hipe-compiled
+        step validate-erlang-thread-pool-size
+        step test-repeated-restart
+        step epmd-should-be-managed-by-systemd
         ;;
     *)
         echo "test not defined, skipping...."